diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..602b330edf --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,25 @@ + + +**Is this a BUG REPORT or FEATURE REQUEST?**: + +> Uncomment only one, leave it on its own line: +> +> /kind bug +> /kind feature + + +**What happened**: + +**What you expected to happen**: + +**How to reproduce it (as minimally and precisely as possible)**: + + +**Anything else we need to know?**: + +**Environment**: +- Kubernetes version (use `kubectl version`): +- Kube-state-metrics image version diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..ae0e2b9883 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,12 @@ + + +**What this PR does / why we need it**: + +**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: +Fixes # + diff --git a/.gitignore b/.gitignore index f3dd882a25..13bdd05a6d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ kube-state-metrics +documented_metrics +tested_metrics # Created by https://www.gitignore.io/api/go diff --git a/.travis.yml b/.travis.yml index 6420b3ca27..e71388684c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,19 +2,22 @@ sudo: required language: go +go: + - "1.11.4" + install: - mkdir -p $HOME/gopath/src/k8s.io - mv $TRAVIS_BUILD_DIR $HOME/gopath/src/k8s.io/kube-state-metrics jobs: include: - - stage: Go fmt - script: make gofmtcheck - - stage: Unit Test - script: make test-unit - - stage: Build - script: make build - - stage: Check that all metrics are documented - script: make doccheck - - stage: E2e - script: make e2e + # Go fmt + - script: make gofmtcheck + # Check that all metrics are documented + - script: make doccheck + # Unit Test + - script: make test-unit + # Build + - script: make build + # E2e + - script: make e2e diff --git a/CHANGELOG.md b/CHANGELOG.md index 9faf7d68a3..27537f8eac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,49 @@ +## v1.5.0 / 2019-01-10 + +After a testing period of 30 days, there were no additional bugs found or features introduced. Due to no bugs being reported over an in total 41 days period, we feel no more pre-releases are necessary for a stable release. + +This release's focus was a large architectural change in order to improve performance and resource usage of kube-state-metrics drastically. Special thanks to @mxinden for his hard work on this! See the changelog of the pre-releases for more detailed information and related pull requests. + +An additional change has been requested to be listed in the release notes: + +* [CHANGE] Due to removal of the surrounding mechanism the `ksm_resources_per_scrape` and `ksm_scrape_error_total` metrics no longer exists. + +## v1.5.0-beta.0 / 2018-12-11 + +After a testing period of 11 days, there were no additional bugs found or features introduced. + +## v1.5.0-alpha.0 / 2018-11-30 + +* [CHANGE] Disable gzip compression of kube-state-metrics responses by default. Can be re-enabled via `--enable-gzip-encoding`. See #563 for more details. +* [FEATURE] Add `kube_replicaset_owner` metric (#520). +* [FEATURE] Add `kube_pod_container_status_last_terminated_reason` metric (#535). +* [FEATURE] Add `stateful_set_status.{current,update}_revision` metric (#545). +* [FEATURE] Add pod disruption budget collector (#551). +* [FEATURE] Make kube-state-metrics usable as a library (#575). +* [FEATURE] Add `kube_service_spec_external_ip` metric and add `external_name` and `load_balancer_ip` label to `kube_service_info` metric (#571). +* [ENHANCEMENT] Add uid info in `kube_pod_info` metric (#508). +* [ENHANCEMENT] Update addon-resizer to 1.8.3 and increase resource limits (#552). +* [ENHANCEMENT] Improve metric caching and rendering performance (#498). +* [ENHANCEMENT] Adding CreateContainerConfigError as possible reason for container not starting (#578). + +## v1.4.0 / 2018-08-22 + +After a testing period of 16 days, there were no additional bugs found or features introduced. + +## v1.4.0-rc.0 / 2018-08-06 + +* [CHANGE] `kube_job_status_start_time` and `kube_job_status_completion_time` metric types changed from counter to gauge. +* [CHANGE] `job` label to `job_name` as this collides with the Prometheus `job` label. +* [FEATURE] Allow white- and black-listing metrics to be exposed. +* [FEATURE] Add `kube_node_status_capacity` and `kube_node_status_allocatable` metrics. +* [FEATURE] Add `kube_pod_status_scheduled_time` metric. +* [FEATURE] Add `kube_pod_container_status_waiting_reason` and `kube_pod_container_status_terminated_reason` metrics. +* [ENHANCEMENT] Add generic resource metrics for Pods, `kube_pod_container_resource_requests` and `kube_pod_container_resource_limits`. This deprecates the old resource metrics for Pods. +* [ENHANCEMENT] Prefer protobuf over json when communicating with the Kubernetes API. +* [ENHANCEMENT] Add dynamic volume support. +* [ENHANCEMENT] Properly set kube-state-metrics user agent when performing requests against the Kubernetes API. +* [BUGFIX] Fix incrorrect HPA metric labels. + ## v1.3.1 / 2018-04-12 * [BUGFIX] Use Go 1.10.1 fixing TLS and memory issues. @@ -9,6 +55,10 @@ After a testing period of 12 days, there were no additional bugs found or featur ## v1.3.0-rc.0 / 2018-03-23 +* [CHANGE] Removed `--in-cluster` flag in [#371](https://github.com/kubernetes/kube-state-metrics/pull/371). + Users can no longer specify `--apiserver` with `--in-cluster=true`. To + emulate this behaviour in future releases, set the `KUBERNETES_SERVICE_HOST` + environment variable to the value of the `--apiserver` argument. * [FEATURE] Allow to specify multiple namespace. * [FEATURE] Add `kube_pod_completion_time`, `kube_pod_spec_volumes_persistentvolumeclaims_info`, and `kube_pod_spec_volumes_persistentvolumeclaims_readonly` metrics to the Pod collector. * [FEATURE] Add `kube_node_spec_taint` metric. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..5e248fdd7f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + \ No newline at end of file diff --git a/Documentation/README.md b/Documentation/README.md index 05cdb97774..4bf47fd6c7 100644 --- a/Documentation/README.md +++ b/Documentation/README.md @@ -4,6 +4,41 @@ This documentation is intended to be a complete reflection of the current state Any contribution to improving this documentation or adding sample usages will be appreciated. +## Table of Contents + +- [Metrics Stages](#metrics-stages) +- [Metrics Deprecation](#metrics-deprecation) +- [Exposed Metrics](#exposed-metrics) +- [Join Metrics](#join-metrics) + +## Metrics Stages +Stages about metrics are grouped into three categories: + +| Stage | Description | +| ----------- | ----------- | +| EXPERIMENTAL | Metrics which are normally corresponds to Kubernetes API object alpha status or spec fields and can be changed at any time. | +| STABLE | Metrics which should be very few backwards-incompatible changes outside of major version updates. | +| DEPRECATED | Metrics which will be removed once the deprecation timeline is met. | + +## Metrics Deprecation +* **The following non-generic resource metrics for pods are marked deprecated. They will be removed in kube-state-metrics v2.0.0.** +`kube_pod_container_resource_requests` and `kube_pod_container_resource_limits` are the replacements with `resource` labels +representing the resource name and `unit` labels representing the resource unit. + * kube_pod_container_resource_requests_cpu_cores + * kube_pod_container_resource_limits_cpu_cores + * kube_pod_container_resource_requests_memory_bytes + * kube_pod_container_resource_limits_memory_bytes +* **The following non-generic resource metrics for nodes are marked deprecated. They will be removed in kube-state-metrics v2.0.0.** +`kube_node_status_capacity` and `kube_node_status_allocatable` are the replacements with `resource` labels +representing the resource name and `unit` labels representing the resource unit. + * kube_node_status_capacity_pods + * kube_node_status_capacity_cpu_cores + * kube_node_status_capacity_memory_bytes + * kube_node_status_allocatable_pods + * kube_node_status_allocatable_cpu_cores + * kube_node_status_allocatable_memory_bytes + +## Exposed Metrics Per group of metrics there is one file for each metrics. See each file for specific documentation about the exposed metrics: * [CronJob Metrics](cronjob-metrics.md) @@ -15,6 +50,7 @@ Per group of metrics there is one file for each metrics. See each file for speci * [PersistentVolume Metrics](persistentvolume-metrics.md) * [PersistentVolumeClaim Metrics](persistentvolumeclaim-metrics.md) * [Pod Metrics](pod-metrics.md) +* [Pod Disruption Budget Metrics](poddisruptionbudget-metrics.md) * [ReplicaSet Metrics](replicaset-metrics.md) * [ReplicationController Metrics](replicationcontroller-metrics.md) * [ResourceQuota Metrics](resourcequota-metrics.md) @@ -24,4 +60,17 @@ Per group of metrics there is one file for each metrics. See each file for speci * [Horizontal Pod Autoscaler Metrics](horizontalpodautoscaler-metrics.md) * [Endpoint Metrics](endpoint-metrics.md) * [Secret Metrics](secret-metrics.md) -* [ConfigMap Metrics](configmap-metrics.md) \ No newline at end of file +* [ConfigMap Metrics](configmap-metrics.md) + + +## Join Metrics +When an additional, not provided by default label is needed, a [Prometheus matching operator](https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching) +can be used to extend single metrics output. + +This example adds `label_release` to the set of default labels of the `kube_pod_status_ready` metric +and allows you select or group the metrics by helm release label: + +``` +kube_pod_status_ready * on (namespace, pod) group_left(label_release) kube_pod_labels +``` + diff --git a/Documentation/configmap-metrics.md b/Documentation/configmap-metrics.md index faba0e4818..5fdf12d9a7 100644 --- a/Documentation/configmap-metrics.md +++ b/Documentation/configmap-metrics.md @@ -1,7 +1,7 @@ # ConfigMap Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_configmap_info | Gauge | `configmap`=<configmap-name>
`namespace`=<configmap-namespace> | -| kube_configmap_created | Gauge | `configmap`=<configmap-name>
`namespace`=<configmap-namespace> | -| kube_configmap_metadata_resource_version | Gauge | `configmap`=<configmap-name>
`namespace`=<configmap-namespace>
`resource_version`=<secret-resource-version> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_configmap_info | Gauge | `configmap`=<configmap-name>
`namespace`=<configmap-namespace> | STABLE | +| kube_configmap_created | Gauge | `configmap`=<configmap-name>
`namespace`=<configmap-namespace> | STABLE | +| kube_configmap_metadata_resource_version | Gauge | `configmap`=<configmap-name>
`namespace`=<configmap-namespace>
`resource_version`=<secret-resource-version> | STABLE | diff --git a/Documentation/cronjob-metrics.md b/Documentation/cronjob-metrics.md index 67988635a9..2ae5d0ca3c 100644 --- a/Documentation/cronjob-metrics.md +++ b/Documentation/cronjob-metrics.md @@ -1,12 +1,12 @@ # CronJob Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_cronjob_info | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace>
`schedule`=<schedule>
`concurrency_policy`=<concurrency-policy> | -| kube_cronjob_labels | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace>
`label_CRONJOB_LABEL`=<CRONJOB_LABEL> | -| kube_cronjob_created | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | -| kube_cronjob_next_schedule_time | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | -| kube_cronjob_status_active | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | -| kube_cronjob_status_last_schedule_time | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | -| kube_cronjob_spec_suspend | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | -| kube_cronjob_spec_starting_deadline_seconds | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_cronjob_info | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace>
`schedule`=<schedule>
`concurrency_policy`=<concurrency-policy> | STABLE +| kube_cronjob_labels | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace>
`label_CRONJOB_LABEL`=<CRONJOB_LABEL> | STABLE +| kube_cronjob_created | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | STABLE +| kube_cronjob_next_schedule_time | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | STABLE +| kube_cronjob_status_active | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | STABLE +| kube_cronjob_status_last_schedule_time | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | STABLE +| kube_cronjob_spec_suspend | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | STABLE +| kube_cronjob_spec_starting_deadline_seconds | Gauge | `cronjob`=<cronjob-name>
`namespace`=<cronjob-namespace> | STABLE diff --git a/Documentation/daemonset-metrics.md b/Documentation/daemonset-metrics.md index 2489fb223a..d974d22f1b 100644 --- a/Documentation/daemonset-metrics.md +++ b/Documentation/daemonset-metrics.md @@ -1,14 +1,14 @@ # DaemonSet Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_daemonset_created | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_status_current_number_scheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_status_desired_number_scheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_status_number_available | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_status_number_misscheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_status_number_ready | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_status_number_unavailable | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_updated_number_scheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_metadata_generation | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | -| kube_daemonset_labels | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace>
`label_DAEMONSET_LABEL`=<DAEMONSET_LABEL> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_daemonset_created | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_status_current_number_scheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_status_desired_number_scheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_status_number_available | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_status_number_misscheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_status_number_ready | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_status_number_unavailable | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_updated_number_scheduled | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_metadata_generation | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace> | STABLE | +| kube_daemonset_labels | Gauge | `daemonset`=<daemonset-name>
`namespace`=<daemonset-namespace>
`label_DAEMONSET_LABEL`=<DAEMONSET_LABEL> | STABLE | diff --git a/Documentation/deployment-metrics.md b/Documentation/deployment-metrics.md index e186d65a57..a9204739a5 100644 --- a/Documentation/deployment-metrics.md +++ b/Documentation/deployment-metrics.md @@ -1,16 +1,16 @@ # Deployment Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_deployment_status_replicas | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_status_replicas_available | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_status_replicas_unavailable | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_status_replicas_updated | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_status_observed_generation | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_spec_replicas | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_spec_paused | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_spec_strategy_rollingupdate_max_unavailable | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_spec_strategy_rollingupdate_max_surge | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_metadata_generation | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_labels | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | -| kube_deployment_created | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_deployment_status_replicas | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_status_replicas_available | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_status_replicas_unavailable | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_status_replicas_updated | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_status_observed_generation | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_spec_replicas | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_spec_paused | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_spec_strategy_rollingupdate_max_unavailable | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_spec_strategy_rollingupdate_max_surge | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_metadata_generation | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_labels | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | +| kube_deployment_created | Gauge | `deployment`=<deployment-name>
`namespace`=<deployment-namespace> | STABLE | diff --git a/Documentation/design/metrics-store-performance-optimization.md b/Documentation/design/metrics-store-performance-optimization.md new file mode 100644 index 0000000000..74551bcdc8 --- /dev/null +++ b/Documentation/design/metrics-store-performance-optimization.md @@ -0,0 +1,177 @@ +# Kube-State-Metrics - Performance Optimization Proposal + + +--- + +Author: Max Inden (IndenML@gmail.com) + +Date: 23. July 2018 + +Target release: v1.5.0 + +--- + + +## Glossary + +- kube-state-metrics: “Simple service that listens to the Kubernetes API server + and generates metrics about the state of the objects” + +- Time series: A single line in a /metrics response e.g. + “metric_name{label="value"} 1” + + +## Problem Statement + +There has been repeated reports of two issues running kube-state-metrics on +production Kubernetes clusters. First kube-state-metrics takes a long time +(“10s - 20s”) to respond on its /metrics endpoint, leading to Prometheus +instances dropping the scrape interval request and marking the given time series +as stale. Second kube-state-metrics uses a lot of memory and thereby being +out-of-memory killed due to low set Kubernetes resource limits. + + +## Goal + +The goal of this proposal can be split into the following sub-goals ordered by +their priority: + +1. Decrease response time on /metrics endpoint + +2. Decrease overall runtime memory usage + + +## Status Quo + +Instead of requesting the needed information from the Kubernetes API-Server on +demand (on scrape), kube-state-metrics uses the Kubernetes client-go cache tool +to keep a full in memory representation of all Kubernetes objects of a given +cluster. Using the cache speeds up the performance critical path of replying to +a scrape request, and reduces the load on the Kubernetes API-Server by only +sending deltas whenever they occur. Kube-state-metrics does not make use of all +properties and sub-objects of these Kubernetes objects that it stores in its +cache. + +On a scrape request by e.g. Prometheus on the /metrics endpoint +kube-state-metrics calculates the configured time series on demand based on the +objects in its cache and converts them to the Prometheus string representation. + + +## Proposal + +Instead of a full representation of all Kubernetes objects with all its +properties in memory via the Kubernetes client-go cache, use a map, addressable +by the Kubernetes object uuid, containing all time series of that object as a +single multi-line string. + +``` +var cache = map[uuid][]byte{} +``` + +Kube-state-metrics listens on add, update and delete events via Kubernetes +client-go reflectors. On add and update events kube-state-metrics generates all +time series related to the Kubernetes object based on the event’s payload, +concatenates the time series to a single byte slice and sets / replaces the byte +slice in the store at the uuid of the Kubernetes object. One can precompute the +length of a time series byte slice before allocation as the sum of the length of +the metric name, label keys and values as well as the metric value in string +representation. On delete events kube-state-metrics deletes the uuid entry of +the given Kubernetes object in the cache map. + +On a scrape request on the /metrics endpoint, kube-state-metrics iterates over +the cache map and concatenates all time series string blobs into a single +string, which is finally passed on as a response. + +``` + +---------------+ +-----------+ +---------------+ +-------------------+ + | pod_reflector | | pod_store | | pod_collector | | metrics_endpoint | + +---------------+ +-----------+ +---------------+ +-------------------+ +-------------\ | | | | +| new pod p1 |-| | | | +|------------| | | | | + | | | | + | Add(p1) | | | + |-------------->| | | + | | ----------------------\ | | + | |-| generateMetrics(p1) | | | + | | |---------------------| | | + | | | | + | nil | | | + |<--------------| | | + | | | | ---------------\ + | | | |-| GET /metrics | + | | | | |--------------| + | | | | + | | | Collect() | + | | |<--------------------------| + | | | | + | | GetAll() | | + | |<------------------------------| | + | | | | + | | []string{metrics} | | + | |------------------------------>| | + | | | | + | | | concat(metrics) | + | | |-------------------------->| + | | | | + +``` + +
+ Code to reproduce diagram + +Build via [text-diagram](http://weidagang.github.io/text-diagram/) + +``` +object pod_reflector pod_store pod_collector metrics_endpoint + +note left of pod_reflector: new pod p1 +pod_reflector -> pod_store: Add(p1) +note right of pod_store: generateMetrics(p1) +pod_store -> pod_reflector: nil + +note right of metrics_endpoint: GET /metrics +metrics_endpoint -> pod_collector: Collect() +pod_collector -> pod_store: GetAll() +pod_store -> pod_collector: []string{metrics} +pod_collector -> metrics_endpoint: concat(metrics) +``` + +
+ + +## FAQ / Follow up improvements + +- If kube-state-metrics only listens on add, update and delete events, how is it + aware of already existing Kubernetes objects created before kube-state-metrics + was started? Leveraging Kubernetes client-go, reflectors can initialize all + existing objects before any add, update or delete events. To ensure no events + are missed in the long run, periodic resyncs via Kubernetes client-go can be + triggered. This extra confidence is not a must and should be compared to its + costs, as Kubernetes client-go already gives decent guarantees on event + delivery. + +- What about metadata (HELP and description) in the /metrics output? As a first + iteration they would be skipped until we have a better idea on the design. + +- How can the cache map be concurrently accessed? The core golang map + implementation is not thread-safe. As a first iteration a simple mutex should + be sufficient. Golangs sync.Map might be considered. + +- To solve the problem of out of order events send by the Kubernetes API-Server + to kube-state-metrics, to each blob of time series inside the cache map it can + keep the Kubernetes resource version. On add and update events, first compare + the resource version of the event with than the resource version in the cache. + Only move forward if the former is higher than the latter. + +- In case the memory consumption of the time series string blobs is a problem + the following optimization can be considered: Among the time series strings, + multiple sub-strings will be heavily duplicated like the metric name. Instead + of saving unstructured strings inside the cache map, one can structure them, + using pointers to deduplicate e.g. metric names. + +- ... + +- Kube-state-metrics does not make use of all properties of all Kubernetes + objects. Instead of unmarshalling unused properties, their json struct tags or + their Protobuf representation could be removed. diff --git a/Documentation/endpoint-metrics.md b/Documentation/endpoint-metrics.md index c2115f689d..c2122b4d08 100644 --- a/Documentation/endpoint-metrics.md +++ b/Documentation/endpoint-metrics.md @@ -1,9 +1,9 @@ # Endpoint Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_endpoint_address_not_ready | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | -| kube_endpoint_address_available | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | -| kube_endpoint_info | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | -| kube_endpoint_labels | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace>
`label_endpoint_LABEL`=<endpoint_LABEL> | -| kube_endpoint_created | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_endpoint_address_not_ready | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | STABLE | +| kube_endpoint_address_available | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | STABLE | +| kube_endpoint_info | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | STABLE | +| kube_endpoint_labels | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace>
`label_endpoint_LABEL`=<endpoint_LABEL> | STABLE | +| kube_endpoint_created | Gauge | `endpoint`=<endpoint-name>
`namespace`=<endpoint-namespace> | STABLE | diff --git a/Documentation/horizontalpodautoscaler-metrics.md b/Documentation/horizontalpodautoscaler-metrics.md index c194dc1db4..5ea168c06b 100644 --- a/Documentation/horizontalpodautoscaler-metrics.md +++ b/Documentation/horizontalpodautoscaler-metrics.md @@ -1,9 +1,9 @@ # Horizontal Pod Autoscaler Metrics -| Metic name | Metric type | Labels/tags | -| -------------------------------- | ----------- | ------------------------------------------------------------- | -| kube_hpa_metadata_generation | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | -| kube_hpa_spec_max_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | -| kube_hpa_spec_min_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | -| kube_hpa_status_current_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | -| kube_hpa_status_desired_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | +| Metic name | Metric type | Labels/tags | Status | +| -------------------------------- | ----------- | ------------------------------------------------------------- | ----------- | +| kube_hpa_metadata_generation | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | STABLE | +| kube_hpa_spec_max_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | STABLE | +| kube_hpa_spec_min_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | STABLE | +| kube_hpa_status_current_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | STABLE | +| kube_hpa_status_desired_replicas | Gauge | `hpa`=<hpa-name>
`namespace`=<hpa-namespace> | STABLE | diff --git a/Documentation/job-metrics.md b/Documentation/job-metrics.md index 54d5e2f292..9aec6cc61b 100644 --- a/Documentation/job-metrics.md +++ b/Documentation/job-metrics.md @@ -1,17 +1,17 @@ # Job Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_job_info | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_labels | Gauge | `job`=<job-name>
`namespace`=<job-namespace>
`label_JOB_LABEL`=<JOB_LABEL> | -| kube_job_spec_parallelism | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_spec_completions | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_spec_active_deadline_seconds | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_status_active | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_status_succeeded | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_status_failed | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_status_start_time | Counter | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_status_completion_time | Counter | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_complete | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_failed | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | -| kube_job_created | Gauge | `job`=<job-name>
`namespace`=<job-namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_job_info | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_labels | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace>
`label_JOB_LABEL`=<JOB_LABEL> | STABLE | +| kube_job_spec_parallelism | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_spec_completions | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_spec_active_deadline_seconds | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_status_active | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_status_succeeded | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_status_failed | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_status_start_time | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_status_completion_time | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_complete | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_failed | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | +| kube_job_created | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE | diff --git a/Documentation/limitrange-metrics.md b/Documentation/limitrange-metrics.md index 13a32ab0a9..78ba1a844f 100644 --- a/Documentation/limitrange-metrics.md +++ b/Documentation/limitrange-metrics.md @@ -1,6 +1,6 @@ # LimitRange Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_limitrange | Gauge | `limitrange`=<limitrange-name>
`namespace`=<namespace>
`resource`=<ResourceName>
`type`=<Pod\|Container\|PersistentVolumeClaim>
`constraint`=<constraint>| -| kube_limitrange_created | Gauge | `limitrange`=<limitrange-name>
`namespace`=<namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_limitrange | Gauge | `limitrange`=<limitrange-name>
`namespace`=<namespace>
`resource`=<ResourceName>
`type`=<Pod\|Container\|PersistentVolumeClaim>
`constraint`=<constraint>| STABLE | +| kube_limitrange_created | Gauge | `limitrange`=<limitrange-name>
`namespace`=<namespace> | STABLE | diff --git a/Documentation/namespace-metrics.md b/Documentation/namespace-metrics.md index 848eec8f84..2094b15ab5 100644 --- a/Documentation/namespace-metrics.md +++ b/Documentation/namespace-metrics.md @@ -1,8 +1,8 @@ # Namespace Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_namespace_status_phase| Gauge | `namespace`=<namespace-name>
`status`=<Active\|Terminating> | -| kube_namespace_labels | Gauge | `namespace`=<namespace-name>
`label_NS_LABEL`=<NS_LABEL> | -| kube_namespace_annotations | Gauge | `namespace`=<namespace-name>
`annotation_NS_ANNOTATION`=<NS_ANNOTATION> | -| kube_namespace_created | Gauge | `namespace`=<namespace-name> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_namespace_status_phase| Gauge | `namespace`=<namespace-name>
`status`=<Active\|Terminating> | STABLE | +| kube_namespace_labels | Gauge | `namespace`=<namespace-name>
`label_NS_LABEL`=<NS_LABEL> | STABLE | +| kube_namespace_annotations | Gauge | `namespace`=<namespace-name>
`annotation_NS_ANNOTATION`=<NS_ANNOTATION> | STABLE | +| kube_namespace_created | Gauge | `namespace`=<namespace-name> | STABLE | diff --git a/Documentation/node-metrics.md b/Documentation/node-metrics.md index f2ebc813c6..afa1650504 100644 --- a/Documentation/node-metrics.md +++ b/Documentation/node-metrics.md @@ -1,19 +1,19 @@ # Node Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_node_info | Gauge | `node`=<node-address>
`kernel_version`=<kernel-version>
`os_image`=<os-image-name>
`container_runtime_version`=<container-runtime-and-version-combination>
`kubelet_version`=<kubelet-version>
`kubeproxy_version`=<kubeproxy-version> | -| kube_node_labels | Gauge | `node`=<node-address>
`label_NODE_LABEL`=<NODE_LABEL> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_node_info | Gauge | `node`=<node-address>
`kernel_version`=<kernel-version>
`os_image`=<os-image-name>
`container_runtime_version`=<container-runtime-and-version-combination>
`kubelet_version`=<kubelet-version>
`kubeproxy_version`=<kubeproxy-version>
`provider_id`=<provider-id> | STABLE | +| kube_node_labels | Gauge | `node`=<node-address>
`label_NODE_LABEL`=<NODE_LABEL> | STABLE | | kube_node_spec_unschedulable | Gauge | `node`=<node-address>| -| kube_node_spec_taint | Gauge | `node`=<node-address>
`key`=<taint-key>
`value=`<taint-value>
`effect=`<taint-effect> | -| kube_node_status_phase| Gauge | `node`=<node-address>
`phase`=<Pending\|Running\|Terminated> | -| kube_node_status_capacity_cpu_cores | Gauge | `node`=<node-address>| -| kube_node_status_capacity_nvidia_gpu_cards | Gauge | `node`=<node-address>| -| kube_node_status_capacity_memory_bytes | Gauge | `node`=<node-address>| -| kube_node_status_capacity_pods | Gauge | `node`=<node-address>| -| kube_node_status_allocatable_cpu_cores | Gauge | `node`=<node-address>| -| kube_node_status_allocatable_nvidia_gpu_cards | Gauge | `node`=<node-address>| -| kube_node_status_allocatable_memory_bytes | Gauge | `node`=<node-address>| -| kube_node_status_allocatable_pods | Gauge | `node`=<node-address>| -| kube_node_status_condition | Gauge | `node`=<node-address>
`condition`=<node-condition>
`status`=<true\|false\|unknown> | -| kube_node_created | Gauge | `node`=<node-address>| +| kube_node_spec_taint | Gauge | `node`=<node-address>
`key`=<taint-key>
`value=`<taint-value>
`effect=`<taint-effect> | STABLE | +| kube_node_status_phase| Gauge | `node`=<node-address>
`phase`=<Pending\|Running\|Terminated> | STABLE | +| kube_node_status_capacity | Gauge | `node`=<node-address>
`resource`=<resource-name>
`unit=`<resource-unit>| STABLE | +| kube_node_status_capacity_cpu_cores | Gauge | `node`=<node-address>| STABLE | +| kube_node_status_capacity_memory_bytes | Gauge | `node`=<node-address>| STABLE | +| kube_node_status_capacity_pods | Gauge | `node`=<node-address>| STABLE | +| kube_node_status_allocatable | Gauge | `node`=<node-address>
`resource`=<resource-name>
`unit=`<resource-unit>| STABLE | +| kube_node_status_allocatable_cpu_cores | Gauge | `node`=<node-address>| STABLE | +| kube_node_status_allocatable_memory_bytes | Gauge | `node`=<node-address>| STABLE | +| kube_node_status_allocatable_pods | Gauge | `node`=<node-address>| STABLE | +| kube_node_status_condition | Gauge | `node`=<node-address>
`condition`=<node-condition>
`status`=<true\|false\|unknown> | STABLE | +| kube_node_created | Gauge | `node`=<node-address>| STABLE | diff --git a/Documentation/persistentvolume-metrics.md b/Documentation/persistentvolume-metrics.md index 7aa8517835..63ee1f16ee 100644 --- a/Documentation/persistentvolume-metrics.md +++ b/Documentation/persistentvolume-metrics.md @@ -1,8 +1,8 @@ -# PersistentVolumeClaim Metrics +# PersistentVolume Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_persistentvolume_status_phase | Gauge | `persistentvolume`=<pv-name>
`namespace`=<pv-namespace>
`phase`=<Bound\|Failed\|Pending\|Available\|Released>| -| kube_persistentvolume_labels | Gauge | `persistentvolume`=<persistentvolume-name>
`namespace`=<persistentvolume-namespace>
`label_PERSISTENTVOLUME_LABEL`=<PERSISTENTVOLUME_LABEL> | -| kube_persistentvolume_info | Gauge | `persistentvolume`=<pv-name>
`namespace`=<pv-namespace>
`storageclass`=<storageclass-name> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_persistentvolume_status_phase | Gauge | `persistentvolume`=<pv-name>
`phase`=<Bound\|Failed\|Pending\|Available\|Released>| STABLE | +| kube_persistentvolume_labels | Gauge | `persistentvolume`=<persistentvolume-name>
`label_PERSISTENTVOLUME_LABEL`=<PERSISTENTVOLUME_LABEL> | STABLE | +| kube_persistentvolume_info | Gauge | `persistentvolume`=<pv-name>
`storageclass`=<storageclass-name> | STABLE | diff --git a/Documentation/persistentvolumeclaim-metrics.md b/Documentation/persistentvolumeclaim-metrics.md index 9b56af7f37..58076f6821 100644 --- a/Documentation/persistentvolumeclaim-metrics.md +++ b/Documentation/persistentvolumeclaim-metrics.md @@ -1,11 +1,11 @@ # PersistentVolumeClaim Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_persistentvolumeclaim_info | Gauge | `namespace`=<persistentvolumeclaim-namespace>
`persistentvolumeclaim`=<persistentvolumeclaim-name>
`storageclass`=<persistentvolumeclaim-storageclassname>
`volumename`=<volumename> | -| kube_persistentvolumeclaim_labels | Gauge | `persistentvolumeclaim`=<persistentvolumeclaim-name>
`namespace`=<persistentvolumeclaim-namespace>
`label_PERSISTENTVOLUMECLAIM_LABEL`=<PERSISTENTVOLUMECLAIM_LABEL> | -| kube_persistentvolumeclaim_status_phase | Gauge | `namespace`=<persistentvolumeclaim-namespace>
`persistentvolumeclaim`=<persistentvolumeclaim-name>
`phase`=<Pending\|Bound\|Lost> | -| kube_persistentvolumeclaim_resource_requests_storage_bytes | Gauge | `namespace`=<persistentvolumeclaim-namespace>
`persistentvolumeclaim`=<persistentvolumeclaim-name> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_persistentvolumeclaim_info | Gauge | `namespace`=<persistentvolumeclaim-namespace>
`persistentvolumeclaim`=<persistentvolumeclaim-name>
`storageclass`=<persistentvolumeclaim-storageclassname>
`volumename`=<volumename> | STABLE | +| kube_persistentvolumeclaim_labels | Gauge | `persistentvolumeclaim`=<persistentvolumeclaim-name>
`namespace`=<persistentvolumeclaim-namespace>
`label_PERSISTENTVOLUMECLAIM_LABEL`=<PERSISTENTVOLUMECLAIM_LABEL> | STABLE | +| kube_persistentvolumeclaim_status_phase | Gauge | `namespace`=<persistentvolumeclaim-namespace>
`persistentvolumeclaim`=<persistentvolumeclaim-name>
`phase`=<Pending\|Bound\|Lost> | STABLE | +| kube_persistentvolumeclaim_resource_requests_storage_bytes | Gauge | `namespace`=<persistentvolumeclaim-namespace>
`persistentvolumeclaim`=<persistentvolumeclaim-name> | STABLE | Note: diff --git a/Documentation/pod-metrics.md b/Documentation/pod-metrics.md index 5d050bb0c1..804c3fc9c5 100644 --- a/Documentation/pod-metrics.md +++ b/Documentation/pod-metrics.md @@ -1,29 +1,31 @@ # Pod Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_pod_info | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`host_ip`=<host-ip>
`pod_ip`=<pod-ip>
`node`=<node-name>
`created_by_kind`=<created_by_kind>
`created_by_name`=<created_by_name>
| +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_pod_info | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`host_ip`=<host-ip>
`pod_ip`=<pod-ip>
`node`=<node-name>
`created_by_kind`=<created_by_kind>
`created_by_name`=<created_by_name>
`uid`=<pod-uid>| STABLE | | kube_pod_start_time | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_completion_time | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_owner | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`owner_kind`=<owner kind>
`owner_name`=<owner name>
`owner_is_controller`=<whether owner is controller> | -| kube_pod_labels | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`label_POD_LABEL`=<POD_LABEL> | -| kube_pod_status_phase | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`phase`=<Pending\|Running\|Succeeded\|Failed\|Unknown> | -| kube_pod_status_ready | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`condition`=<true\|false\|unknown> | -| kube_pod_status_scheduled | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`condition`=<true\|false\|unknown> | -| kube_pod_container_info | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`image`=<image-name>
`image_id`=<image-id>
`container_id`=<containerid> | -| kube_pod_container_status_waiting | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_container_status_waiting_reason | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<ContainerCreating|CrashLoopBackOff|ErrImagePull|ImagePullBackOff> | -| kube_pod_container_status_running | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_container_status_terminated | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_container_status_terminated_reason | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<OOMKilled|Error|Completed|ContainerCannotRun> | -| kube_pod_container_status_ready | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_container_status_restarts_total | Counter | `container`=<container-name>
`namespace`=<pod-namespace>
`pod`=<pod-name> | -| kube_pod_container_resource_requests_cpu_cores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | -| kube_pod_container_resource_requests_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | -| kube_pod_container_resource_limits_cpu_cores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | -| kube_pod_container_resource_limits_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | -| kube_pod_container_resource_requests_nvidia_gpu_devices | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | -| kube_pod_container_resource_limits_nvidia_gpu_devices | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | +| kube_pod_completion_time | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE | +| kube_pod_owner | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`owner_kind`=<owner kind>
`owner_name`=<owner name>
`owner_is_controller`=<whether owner is controller> | STABLE | +| kube_pod_labels | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`label_POD_LABEL`=<POD_LABEL> | STABLE | +| kube_pod_status_phase | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`phase`=<Pending\|Running\|Succeeded\|Failed\|Unknown> | STABLE | +| kube_pod_status_ready | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`condition`=<true\|false\|unknown> | STABLE | +| kube_pod_status_scheduled | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`condition`=<true\|false\|unknown> | STABLE | +| kube_pod_container_info | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`image`=<image-name>
`image_id`=<image-id>
`container_id`=<containerid> | STABLE | +| kube_pod_container_status_waiting | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE | +| kube_pod_container_status_waiting_reason | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<ContainerCreating\|CrashLoopBackOff\|ErrImagePull\|ImagePullBackOff\|CreateContainerConfigError> | STABLE | +| kube_pod_container_status_running | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE | +| kube_pod_container_status_terminated | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE | +| kube_pod_container_status_terminated_reason | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<OOMKilled\|Error\|Completed\|ContainerCannotRun> | STABLE | +| kube_pod_container_status_last_terminated_reason | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<OOMKilled\|Error\|Completed\|ContainerCannotRun> | STABLE | +| kube_pod_container_status_ready | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE | +| kube_pod_container_status_restarts_total | Counter | `container`=<container-name>
`namespace`=<pod-namespace>
`pod`=<pod-name> | STABLE | +| kube_pod_container_resource_requests_cpu_cores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | STABLE | +| kube_pod_container_resource_requests | Gauge | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | STABLE | +| kube_pod_container_resource_requests_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | STABLE | +| kube_pod_container_resource_limits_cpu_cores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | STABLE | +| kube_pod_container_resource_limits | Gauge | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | STABLE | +| kube_pod_container_resource_limits_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | STABLE | | kube_pod_created | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | -| kube_pod_spec_volumes_persistentvolumeclaims_info | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`volume`=<volume-name>
`persistentvolumeclaim`=<persistentvolumeclaim-claimname> | -| kube_pod_spec_volumes_persistentvolumeclaims_readonly | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`volume`=<volume-name>
`persistentvolumeclaim`=<persistentvolumeclaim-claimname> | +| kube_pod_spec_volumes_persistentvolumeclaims_info | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`volume`=<volume-name>
`persistentvolumeclaim`=<persistentvolumeclaim-claimname> | STABLE | +| kube_pod_spec_volumes_persistentvolumeclaims_readonly | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`volume`=<volume-name>
`persistentvolumeclaim`=<persistentvolumeclaim-claimname> | STABLE | +| kube_pod_status_scheduled_time | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE | diff --git a/Documentation/poddisruptionbudget-metrics.md b/Documentation/poddisruptionbudget-metrics.md new file mode 100644 index 0000000000..ae5668f968 --- /dev/null +++ b/Documentation/poddisruptionbudget-metrics.md @@ -0,0 +1,10 @@ +# PodDisruptionBudget Metrics + +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_poddisruptionbudget_created | Gauge | `poddisruptionbudget`=<pdb-name>
`namespace`=<pdb-namespace> | STABLE +| kube_poddisruptionbudget_status_current_healthy | Gauge | `poddisruptionbudget`=<pdb-name>
`namespace`=<pdb-namespace> | STABLE +| kube_poddisruptionbudget_status_desired_healthy | Gauge | `poddisruptionbudget`=<pdb-name>
`namespace`=<pdb-namespace> | STABLE +| kube_poddisruptionbudget_status_pod_disruptions_allowed | Gauge | `poddisruptionbudget`=<pdb-name>
`namespace`=<pdb-namespace> | STABLE +| kube_poddisruptionbudget_status_expected_pods | Gauge | `poddisruptionbudget`=<pdb-name>
`namespace`=<pdb-namespace> | STABLE +| kube_poddisruptionbudget_status_observed_generation | Gauge | `poddisruptionbudget`=<pdb-name>
`namespace`=<pdb-namespace> | STABLE diff --git a/Documentation/replicaset-metrics.md b/Documentation/replicaset-metrics.md index dc69376689..7a619ff901 100644 --- a/Documentation/replicaset-metrics.md +++ b/Documentation/replicaset-metrics.md @@ -1,11 +1,12 @@ # ReplicaSet metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_replicaset_status_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | -| kube_replicaset_status_fully_labeled_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | -| kube_replicaset_status_ready_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | -| kube_replicaset_status_observed_generation | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | -| kube_replicaset_spec_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | -| kube_replicaset_metadata_generation | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | -| kube_replicaset_created | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_replicaset_status_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_status_fully_labeled_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_status_ready_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_status_observed_generation | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_spec_replicas | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_metadata_generation | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_created | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace> | STABLE | +| kube_replicaset_owner | Gauge | `replicaset`=<replicaset-name>
`namespace`=<replicaset-namespace>
`owner_kind`=<owner kind>
`owner_name`=<owner name>
`owner_is_controller`=<whether owner is controller> | STABLE | \ No newline at end of file diff --git a/Documentation/replicationcontroller-metrics.md b/Documentation/replicationcontroller-metrics.md index aa9dc62a96..2000245c4d 100644 --- a/Documentation/replicationcontroller-metrics.md +++ b/Documentation/replicationcontroller-metrics.md @@ -1,12 +1,12 @@ # ReplicationController metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_replicationcontroller_status_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_status_fully_labeled_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_status_ready_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_status_available_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_status_observed_generation | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_spec_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_metadata_generation | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | -| kube_replicationcontroller_created | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_replicationcontroller_status_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_status_fully_labeled_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_status_ready_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_status_available_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_status_observed_generation | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_spec_replicas | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_metadata_generation | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | +| kube_replicationcontroller_created | Gauge | `replicationcontroller`=<replicationcontroller-name>
`namespace`=<replicationcontroller-namespace> | STABLE | diff --git a/Documentation/resourcequota-metrics.md b/Documentation/resourcequota-metrics.md index 9bc696b087..e98eb4158a 100644 --- a/Documentation/resourcequota-metrics.md +++ b/Documentation/resourcequota-metrics.md @@ -1,6 +1,6 @@ # ResourceQuota Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_resourcequota | Gauge | `resourcequota`=<quota-name>
`namespace`=<namespace>
`resource`=<ResourceName>
`type`=<quota-type> | -| kube_resourcequota_created | Gauge | `resourcequota`=<quota-name>
`namespace`=<namespace> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_resourcequota | Gauge | `resourcequota`=<quota-name>
`namespace`=<namespace>
`resource`=<ResourceName>
`type`=<quota-type> | STABLE | +| kube_resourcequota_created | Gauge | `resourcequota`=<quota-name>
`namespace`=<namespace> | STABLE | diff --git a/Documentation/secret-metrics.md b/Documentation/secret-metrics.md index 6323126fb9..83c4a0677f 100644 --- a/Documentation/secret-metrics.md +++ b/Documentation/secret-metrics.md @@ -1,9 +1,9 @@ # Secret Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_secret_info | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace> | -| kube_secret_type | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace>
`type`=<secret-type> | -| kube_secret_labels | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace>
`label_SECRET_LABEL`=<SECRET_LABEL> | -| kube_secret_created | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace> | -| kube_secret_metadata_resource_version | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace>
`resource_version`=<secret-resource-version> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_secret_info | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace> | STABLE | +| kube_secret_type | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace>
`type`=<secret-type> | STABLE | +| kube_secret_labels | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace>
`label_SECRET_LABEL`=<SECRET_LABEL> | STABLE | +| kube_secret_created | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace> | STABLE | +| kube_secret_metadata_resource_version | Gauge | `secret`=<secret-name>
`namespace`=<secret-namespace>
`resource_version`=<secret-resource-version> | STABLE | diff --git a/Documentation/service-metrics.md b/Documentation/service-metrics.md index a78a91613b..822144113f 100644 --- a/Documentation/service-metrics.md +++ b/Documentation/service-metrics.md @@ -1,8 +1,10 @@ # Service Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_service_info | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`cluster_ip`=<service cluster ip> | -| kube_service_labels | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`label_SERVICE_LABEL`=<SERVICE_LABEL> | -| kube_service_created | Gauge | `service`=<service-name>
`namespace`=<service-namespace> | -| kube_service_spec_type | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`type`=<ClusterIP\|NodePort\|LoadBalancer\|ExternalName> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_service_info | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`cluster_ip`=<service cluster ip>
`external_name`=<service external name> `load_balancer_ip`=<service load balancer ip> | STABLE | +| kube_service_labels | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`label_SERVICE_LABEL`=<SERVICE_LABEL> | STABLE | +| kube_service_created | Gauge | `service`=<service-name>
`namespace`=<service-namespace> | STABLE | +| kube_service_spec_type | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`type`=<ClusterIP\|NodePort\|LoadBalancer\|ExternalName> | STABLE | +| kube_service_spec_external_ip | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`external_ip`=<external-ip> | STABLE | +| kube_service_status_load_balancer_ingress | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`ip`=<load-balancer-ingress-ip>
`hostname`=<load-balancer-ingress-hostname> | STABLE | diff --git a/Documentation/statefulset-metrics.md b/Documentation/statefulset-metrics.md index 3a2fafedcc..175bafe729 100644 --- a/Documentation/statefulset-metrics.md +++ b/Documentation/statefulset-metrics.md @@ -1,13 +1,15 @@ # Stateful Set Metrics -| Metric name| Metric type | Labels/tags | -| ---------- | ----------- | ----------- | -| kube_statefulset_status_replicas | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_status_replicas_current | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_status_replicas_ready | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_status_replicas_updated | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_status_observed_generation | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_replicas | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_metadata_generation | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_created | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | -| kube_statefulset_labels | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace>
`label_STATEFULSET_LABEL`=<STATEFULSET_LABEL> | +| Metric name| Metric type | Labels/tags | Status | +| ---------- | ----------- | ----------- | ----------- | +| kube_statefulset_status_replicas | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_status_replicas_current | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_status_replicas_ready | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_status_replicas_updated | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_status_observed_generation | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_replicas | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_metadata_generation | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_created | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace> | STABLE | +| kube_statefulset_labels | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace>
`label_STATEFULSET_LABEL`=<STATEFULSET_LABEL> | STABLE | +| kube_statefulset_status_current_revision | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace>
`revision`=<statefulset-current-revision> | STABLE | +| kube_statefulset_status_update_revision | Gauge | `statefulset`=<statefulset-name>
`namespace`=<statefulset-namespace>
`revision`=<statefulset-update-revision> | STABLE | diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 5d1b450abb..cd3b50f6dd 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,66 +1,64 @@ { "ImportPath": "k8s.io/kube-state-metrics", "GoVersion": "go1.9", - "GodepVersion": "v79", - "Packages": [ - "./..." - ], + "GodepVersion": "v80", "Deps": [ { - "ImportPath": "github.com/PuerkitoBio/purell", - "Comment": "v1.0.0", - "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" + "ImportPath": "cloud.google.com/go/compute/metadata", + "Comment": "v0.1.0-115-g3b1ae45", + "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" }, { - "ImportPath": "github.com/PuerkitoBio/urlesc", - "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" + "ImportPath": "cloud.google.com/go/internal", + "Comment": "v0.1.0-115-g3b1ae45", + "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" }, { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" - }, - { - "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + "ImportPath": "github.com/Azure/go-autorest/autorest", + "Comment": "v10.6.0-2-g1ff2880", + "Rev": "1ff28809256a84bb6966640ff3d0371af82ccba4" }, { - "ImportPath": "github.com/emicklei/go-restful", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" + "ImportPath": "github.com/Azure/go-autorest/autorest/adal", + "Comment": "v10.6.0-2-g1ff2880", + "Rev": "1ff28809256a84bb6966640ff3d0371af82ccba4" }, { - "ImportPath": "github.com/emicklei/go-restful/log", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" + "ImportPath": "github.com/Azure/go-autorest/autorest/azure", + "Comment": "v10.6.0-2-g1ff2880", + "Rev": "1ff28809256a84bb6966640ff3d0371af82ccba4" }, { - "ImportPath": "github.com/ghodss/yaml", - "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" + "ImportPath": "github.com/Azure/go-autorest/autorest/date", + "Comment": "v10.6.0-2-g1ff2880", + "Rev": "1ff28809256a84bb6966640ff3d0371af82ccba4" }, { - "ImportPath": "github.com/go-openapi/jsonpointer", - "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" + "ImportPath": "github.com/beorn7/perks/quantile", + "Rev": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" }, { - "ImportPath": "github.com/go-openapi/jsonreference", - "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" + "ImportPath": "github.com/davecgh/go-spew/spew", + "Comment": "v1.1.0-1-g782f496", + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, { - "ImportPath": "github.com/go-openapi/spec", - "Rev": "2433d2f0fc794728337e0c5d65716e79e163f04d" + "ImportPath": "github.com/dgrijalva/jwt-go", + "Comment": "v3.0.0-4-g01aeca5", + "Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20" }, { - "ImportPath": "github.com/go-openapi/swag", - "Rev": "0e04f5e499b19bf51031c01a00f098f25067d8dc" + "ImportPath": "github.com/ghodss/yaml", + "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" }, { "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.3-162-gc0656ed", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.3-162-gc0656ed", + "Comment": "v0.4-3-gc0656ed", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, { @@ -69,98 +67,118 @@ }, { "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "7390af9dcd3c33042ebaf2474a1724a83cf1a7e6" + "Comment": "v1.1.0", + "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" }, { "ImportPath": "github.com/golang/protobuf/ptypes", - "Rev": "7390af9dcd3c33042ebaf2474a1724a83cf1a7e6" + "Comment": "v1.1.0", + "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" }, { "ImportPath": "github.com/golang/protobuf/ptypes/any", - "Rev": "7390af9dcd3c33042ebaf2474a1724a83cf1a7e6" + "Comment": "v1.1.0", + "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" }, { "ImportPath": "github.com/golang/protobuf/ptypes/duration", - "Rev": "7390af9dcd3c33042ebaf2474a1724a83cf1a7e6" + "Comment": "v1.1.0", + "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", - "Rev": "7390af9dcd3c33042ebaf2474a1724a83cf1a7e6" + "Comment": "v1.1.0", + "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" }, { "ImportPath": "github.com/google/btree", - "Rev": "316fb6d3f031ae8f4d457c6c5186b9e3ded70435" + "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" }, { "ImportPath": "github.com/google/gofuzz", - "Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" + "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" }, { "ImportPath": "github.com/googleapis/gnostic/OpenAPIv2", - "Comment": "v0.1.0", - "Rev": "ee43cbb60db7bd22502942cccbc39059117352ab" + "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, { "ImportPath": "github.com/googleapis/gnostic/compiler", - "Comment": "v0.1.0", - "Rev": "ee43cbb60db7bd22502942cccbc39059117352ab" + "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, { "ImportPath": "github.com/googleapis/gnostic/extensions", - "Comment": "v0.1.0", - "Rev": "ee43cbb60db7bd22502942cccbc39059117352ab" + "Rev": "0c5108395e2debce0d731cf0287ddf7242066aba" }, { - "ImportPath": "github.com/gregjones/httpcache", - "Rev": "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa" + "ImportPath": "github.com/gophercloud/gophercloud", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/gregjones/httpcache/diskcache", - "Rev": "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa" + "ImportPath": "github.com/gophercloud/gophercloud/openstack", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/hashicorp/golang-lru", - "Rev": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/hashicorp/golang-lru/simplelru", - "Rev": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/howeyc/gopass", - "Rev": "26c6e1184fd5255fa5f5289d0b789a4819c203a4" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/imdario/mergo", - "Comment": "0.1.3-8-g6633656", - "Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/json-iterator/go", - "Comment": "1.0.3", - "Rev": "6240e1e7983a85228f7fd9c3e1b6932d46ec58e2" + "ImportPath": "github.com/gophercloud/gophercloud/pagination", + "Rev": "781450b3c4fcb4f5182bcc5133adb4b2e4a09d1d" }, { - "ImportPath": "github.com/juju/ratelimit", - "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" + "ImportPath": "github.com/gregjones/httpcache", + "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, { - "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "e978125a7e335d8f4db746a9ac5b44643f27416b" + "ImportPath": "github.com/gregjones/httpcache/diskcache", + "Rev": "787624de3eb7bd915c329cba748687a3b22666a6" }, { - "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "e978125a7e335d8f4db746a9ac5b44643f27416b" + "ImportPath": "github.com/hashicorp/golang-lru", + "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" }, { - "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "e978125a7e335d8f4db746a9ac5b44643f27416b" + "ImportPath": "github.com/hashicorp/golang-lru/simplelru", + "Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" + }, + { + "ImportPath": "github.com/imdario/mergo", + "Comment": "0.1.3-8-g6633656", + "Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc" + }, + { + "ImportPath": "github.com/json-iterator/go", + "Comment": "1.1.3-22-gf2b4162", + "Rev": "f2b4162afba35581b6d4a50d3b8f34e33c144682" }, { "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "Comment": "v1.0.0-2-gc12348c", "Rev": "c12348ce28de40eed0136aa2b644d0ee0650e56c" }, + { + "ImportPath": "github.com/modern-go/concurrent", + "Comment": "1.0.3", + "Rev": "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + }, + { + "ImportPath": "github.com/modern-go/reflect2", + "Comment": "1.0.0-9-g05fbef0", + "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + }, { "ImportPath": "github.com/openshift/origin/pkg/util/proc", "Comment": "v1.3.0-alpha.0-282-g8f127d7", @@ -173,13 +191,13 @@ }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "v0.8.0-5-g8aae34f", - "Rev": "8aae34f3ffc9a93c75efeaad87bb192aa7750900" + "Comment": "v0.9.0-pre1-104-gfaf4ec3", + "Rev": "faf4ec335fe01ae5a6a0eaa34a5a9333bfbd1a30" }, { "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp", - "Comment": "v0.8.0-5-g8aae34f", - "Rev": "8aae34f3ffc9a93c75efeaad87bb192aa7750900" + "Comment": "v0.9.0-pre1-104-gfaf4ec3", + "Rev": "faf4ec335fe01ae5a6a0eaa34a5a9333bfbd1a30" }, { "ImportPath": "github.com/prometheus/client_model/go", @@ -209,79 +227,84 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Comment": "v1.0.1", + "Rev": "583c0c0531f06d5278b7d917446061adc344b5cd" }, { "ImportPath": "golang.org/x/crypto/ssh/terminal", - "Rev": "8e06e8ddd9629eb88639aba897641bff8031f1d3" + "Rev": "49796115aa4b964c318aad4f3084fdb41e9aa067" }, { "ImportPath": "golang.org/x/net/context", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" + "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, { "ImportPath": "golang.org/x/net/http2", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" + "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, { "ImportPath": "golang.org/x/net/http2/hpack", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" + "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, { "ImportPath": "golang.org/x/net/idna", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" + "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, { "ImportPath": "golang.org/x/net/lex/httplex", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" + "Rev": "1c05540f6879653db88113bc4a2b70aec4bd491f" }, { - "ImportPath": "golang.org/x/sys/unix", - "Rev": "30de6d19a3bd89a5f38ae4028e23aaa5582648af" + "ImportPath": "golang.org/x/oauth2", + "Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" }, { - "ImportPath": "golang.org/x/text/cases", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" }, { - "ImportPath": "golang.org/x/text/internal", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" }, { - "ImportPath": "golang.org/x/text/internal/tag", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" }, { - "ImportPath": "golang.org/x/text/language", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" }, { - "ImportPath": "golang.org/x/text/runes", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/sys/unix", + "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, { - "ImportPath": "golang.org/x/text/secure/bidirule", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/sys/windows", + "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, { - "ImportPath": "golang.org/x/text/secure/precis", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/text/secure/bidirule", + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, { "ImportPath": "golang.org/x/text/transform", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, { "ImportPath": "golang.org/x/text/unicode/bidi", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, { "ImportPath": "golang.org/x/text/unicode/norm", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, { - "ImportPath": "golang.org/x/text/width", - "Rev": "1e65e9bf72c307081cea196f47ef37aed17eb316" + "ImportPath": "golang.org/x/time/rate", + "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" }, { "ImportPath": "gopkg.in/inf.v0", @@ -290,297 +313,302 @@ }, { "ImportPath": "gopkg.in/yaml.v2", - "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" + "Rev": "670d4cfef0544295bc27a114dbac37980d83185a" }, { "ImportPath": "k8s.io/api/admissionregistration/v1alpha1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/admissionregistration/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/apps/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/apps/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/apps/v1beta2", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/authentication/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/authentication/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/authorization/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/authorization/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/autoscaling/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/autoscaling/v2beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/batch/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/batch/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/batch/v2alpha1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/certificates/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/core/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/events/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/extensions/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/networking/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/policy/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/rbac/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/rbac/v1alpha1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/rbac/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/scheduling/v1alpha1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" + }, + { + "ImportPath": "k8s.io/api/scheduling/v1beta1", + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/settings/v1alpha1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/storage/v1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/storage/v1alpha1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { "ImportPath": "k8s.io/api/storage/v1beta1", - "Comment": "kubernetes-1.9.1-beta.0", - "Rev": "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a" + "Comment": "kubernetes-1.11.0", + "Rev": "072894a440bdee3a891dea811fe42902311cd2a3" }, { - "ImportPath": "k8s.io/apimachinery/pkg/api/equality", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "ImportPath": "k8s.io/apiextensions-apiserver/pkg/features", + "Comment": "kubernetes-1.10.0-alpha.3-634-ga5c89ff", + "Rev": "a5c89ff2aef18eb9de1e8ef5ac81e8be76a8d716" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/meta", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/resource", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/internalversion", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { - "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1alpha1", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" - }, - { - "ImportPath": "k8s.io/apimachinery/pkg/conversion/unstructured", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/fields", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/labels", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/selection", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/types", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/cache", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/clock", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/diff", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/errors", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/framer", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/json", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", @@ -589,18 +617,23 @@ }, { "ImportPath": "k8s.io/apimachinery/pkg/util/net", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/sets", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" + }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", @@ -609,33 +642,38 @@ }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/wait", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/version", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/pkg/watch", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" + }, + { + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", @@ -644,276 +682,857 @@ }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "Comment": "kubernetes-1.8.0-rc.1", - "Rev": "9d38e20d609d27e00d4ec18f7b9db67105a2bde0" + "Comment": "kubernetes-1.11.1-beta.0", + "Rev": "103fd098999dc9c0c88536f5c9ad2e5da39373ae" + }, + { + "ImportPath": "k8s.io/apiserver/pkg/features", + "Comment": "kubernetes-1.10.0-alpha.3-365-g0553b97", + "Rev": "0553b9748924ffb5737340cb0afa6f0de2b12c42" + }, + { + "ImportPath": "k8s.io/apiserver/pkg/util/feature", + "Comment": "kubernetes-1.10.0-alpha.3-365-g0553b97", + "Rev": "0553b9748924ffb5737340cb0afa6f0de2b12c42" }, { "ImportPath": "k8s.io/client-go/discovery", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/discovery/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/admissionregistration", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/admissionregistration/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/apps/v1beta2", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/autoscaling", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/autoscaling/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/autoscaling/v2beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/batch/v2alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/certificates", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/certificates/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/core", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/core/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/events", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/events/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/extensions", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/extensions/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/internalinterfaces", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/networking", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/networking/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/policy", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/policy/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/rbac/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/scheduling", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/scheduling/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/scheduling/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/settings", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/settings/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/informers/storage/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/scheme", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/apps/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/apps/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/apps/v1beta2", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/autoscaling/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/autoscaling/v2beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/batch/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/batch/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/batch/v2alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/certificates/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/core/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/events/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/extensions/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/networking/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/policy/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/rbac/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/rbac/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/rbac/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/scheduling/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/scheduling/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/settings/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/storage/v1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/storage/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/listers/storage/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/pkg/version", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/azure", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/exec", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/oidc", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/plugin/pkg/client/auth/openstack", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/rest", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/rest/watch", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/testing", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/third_party/forked/golang/template", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/auth", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/cache", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/metrics", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/pager", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/tools/reference", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/transport", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/util/buffer", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/util/cert", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/util/homedir", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/client-go/util/integer", - "Comment": "v6.0.0", - "Rev": "78700dec6369ba22221b72770783300f143df150" + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { - "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "abfc5fbe1cf87ee697db107fdfd24c32fe4397a8" + "ImportPath": "k8s.io/client-go/util/jsonpath", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" + }, + { + "ImportPath": "k8s.io/client-go/util/retry", + "Comment": "v8.0.0", + "Rev": "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "abfc5fbe1cf87ee697db107fdfd24c32fe4397a8" + "Rev": "91cfa479c814065e420cee7ed227db0f63a5854e" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.11.0", + "Rev": "91e7b4fd31fcd3d5f436da26c980becec37ceefe" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper", + "Comment": "v1.11.0", + "Rev": "91e7b4fd31fcd3d5f436da26c980becec37ceefe" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper", + "Comment": "v1.11.0", + "Rev": "91e7b4fd31fcd3d5f436da26c980becec37ceefe" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/features", + "Comment": "v1.11.0", + "Rev": "91e7b4fd31fcd3d5f436da26c980becec37ceefe" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.11.0", + "Rev": "91e7b4fd31fcd3d5f436da26c980becec37ceefe" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.11.0", + "Rev": "91e7b4fd31fcd3d5f436da26c980becec37ceefe" } ] } diff --git a/Makefile b/Makefile index e504d26955..5b1c1c044b 100644 --- a/Makefile +++ b/Makefile @@ -7,8 +7,8 @@ ARCH ?= $(shell go env GOARCH) BuildDate = $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') Commit = $(shell git rev-parse --short HEAD) ALL_ARCH = amd64 arm arm64 ppc64le s390x -PKG=k8s.io/kube-state-metrics -GO_VERSION=1.10.1 +PKG=k8s.io/kube-state-metrics/pkg +GO_VERSION=1.11.4 IMAGE = $(REGISTRY)/kube-state-metrics MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) @@ -18,8 +18,8 @@ gofmtcheck: doccheck: @echo "- Checking if documentation is up to date..." - @grep -hoE '(kube_[^ |]+)' Documentation/* | sort -u > documented_metrics - @sed -n 's/.*# TYPE \(kube_[^ ]\+\).*/\1/p' collectors/*_test.go | sort -u > tested_metrics + @grep -hoE '(kube_[^ |]+)' Documentation/* --exclude=README.md| sort -u > documented_metrics + @sed -n 's/.*# TYPE \(kube_[^ ]\+\).*/\1/p' pkg/collectors/*_test.go | sort -u > tested_metrics @diff -u0 tested_metrics documented_metrics || (echo "ERROR: Metrics with - are present in tests but missing in documentation, metrics with + are documented but not tested."; exit 1) @echo OK @rm -f tested_metrics documented_metrics @@ -52,24 +52,36 @@ container: .container-$(ARCH) docker run --rm -v "$$PWD":/go/src/k8s.io/kube-state-metrics -w /go/src/k8s.io/kube-state-metrics -e GOOS=linux -e GOARCH=$(ARCH) -e CGO_ENABLED=0 golang:${GO_VERSION} go build -ldflags "-s -w -X ${PKG}/version.Release=${TAG} -X ${PKG}/version.Commit=${Commit} -X ${PKG}/version.BuildDate=${BuildDate}" -o kube-state-metrics cp -r * $(TEMP_DIR) docker build -t $(MULTI_ARCH_IMG):$(TAG) $(TEMP_DIR) + docker tag $(MULTI_ARCH_IMG):$(TAG) $(MULTI_ARCH_IMG):latest ifeq ($(ARCH), amd64) # Adding check for amd64 docker tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):$(TAG) + docker tag $(MULTI_ARCH_IMG):$(TAG) $(IMAGE):latest endif +quay-push: .quay-push-$(ARCH) +.quay-push-$(ARCH): .container-$(ARCH) + docker push $(MULTI_ARCH_IMG):$(TAG) + docker push $(MULTI_ARCH_IMG):latest +ifeq ($(ARCH), amd64) + docker push $(IMAGE):$(TAG) + docker push $(IMAGE):latest +endif push: .push-$(ARCH) .push-$(ARCH): .container-$(ARCH) gcloud docker -- push $(MULTI_ARCH_IMG):$(TAG) + gcloud docker -- push $(MULTI_ARCH_IMG):latest ifeq ($(ARCH), amd64) gcloud docker -- push $(IMAGE):$(TAG) + gcloud docker -- push $(IMAGE):latest endif clean: rm -f kube-state-metrics e2e: - ./scripts/e2e.sh + ./tests/e2e.sh -.PHONY: all build all-push all-container test-unit container push clean e2e +.PHONY: all build all-push all-container test-unit container push quay-push clean e2e diff --git a/OWNERS b/OWNERS index 613fd60082..c98e309107 100644 --- a/OWNERS +++ b/OWNERS @@ -1,7 +1,7 @@ reviewers: - brancz - andyxning + - zouyee approvers: - brancz - andyxning - - fabxc diff --git a/README.md b/README.md index 23d3c79e26..fc50f4a0dd 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,19 @@ the Metrics section below.) It is not focused on the health of the individual Kubernetes components, but rather on the health of the various objects inside, such as deployments, nodes and pods. -The metrics are exported through the [Prometheus golang -client](https://github.com/prometheus/client_golang) on the HTTP endpoint `/metrics` on -the listening port (default 80). They are served either as plaintext or -protobuf depending on the `Accept` header. They are designed to be consumed -either by Prometheus itself or by a scraper that is compatible with scraping -a Prometheus client endpoint. You can also open `/metrics` in a browser to see +That kube-state-metrics is about generating metrics from Kubernetes API +objects without modification. This ensures, that features provided by +kube-state-metrics have the same grade of stability as the Kubernetes API +objects themselves. In turn this means, that kube-state-metrics in certain +situation may not show the exact same values as kubectl, as kubectl applies +certain heuristics to display comprehensible messages. kube-state-metrics +exposes raw data unmodified from the Kubernetes API, this way users have all the +data they require and perform heuristics as they see fit. + +The metrics are exported on the HTTP endpoint `/metrics` on the listening port +(default 80). They are served as plaintext. They are designed to be consumed +either by Prometheus itself or by a scraper that is compatible with scraping a +Prometheus client endpoint. You can also open `/metrics` in a browser to see the raw metrics. ## Table of Contents @@ -26,12 +33,12 @@ the raw metrics. - [Metrics Documentation](#metrics-documentation) - [Kube-state-metrics self metrics](#kube-state-metrics-self-metrics) - [Resource recommendation](#resource-recommendation) -- [kube-state-metrics vs. Heaspter](#kube-state-metrics-vs-heapster) +- [kube-state-metrics vs. metrics-server(Heapster)](#kube-state-metrics-vs-metrics-serverheapster) - [Setup](#setup) - [Building the Docker container](#building-the-docker-container) - [Usage](#usage) - [Kubernetes Deployment](#kubernetes-deployment) - - [Deployment](#deployment) + - [Development](#development) ### Versioning @@ -46,14 +53,15 @@ All additional compatibility is only best effort, or happens to still/already be #### Compatibility matrix At most 5 kube-state-metrics releases will be recorded below. -| kube-state-metrics | client-go | **Kubernetes 1.4** | **Kubernetes 1.5** | **Kubernetes 1.6** | **Kubernetes 1.7** | **Kubernetes 1.8** | **Kubernetes 1.9** | -|--------------------|-----------|---------------------|--------------------|--------------------|--------------------|--------------------|--------------------| -| **v0.5.0** | v2.0.0-alpha.1 | ✓ | ✓ | - | - | - | - | -| **v1.0.x** | 4.0.0-beta.0 | ✓ | ✓ | ✓ | ✓ | - | - | -| **v1.1.0** | release-5.0 | ✓ | ✓ | ✓ | ✓ | ✓ | - | -| **v1.2.0** | v6.0.0 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | -| **v1.3.0** | v6.0.0 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | -| **master** | v6.0.0 | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | +| kube-state-metrics | client-go | **Kubernetes 1.9** | **Kubernetes 1.10** | **Kubernetes 1.11** | **Kubernetes 1.12** | +|--------------------|-----------|--------------------|--------------------|--------------------|--------------------| +| **v1.1.0** | release-5.0 | ✓ | ✓ | - | - | +| **v1.2.0** | v6.0.0 | ✓ | ✓ | ✓ | ✓ | +| **v1.3.0** | v6.0.0 | ✓ | ✓ | ✓ | ✓ | +| **v1.3.1** | v6.0.0 | ✓ | ✓ | ✓ | ✓ | +| **v1.4.0** | v8.0.0 | ✓ | ✓ | ✓ | ✓ | +| **v1.5.0** | v8.0.0 | ✓ | ✓ | ✓ | ✓ | +| **master** | v8.0.0 | ✓ | ✓ | ✓ | ✓ | - `✓` Fully supported version range. - `-` The Kubernetes cluster has features the client-go library can't use (additional API objects, etc). @@ -65,8 +73,8 @@ release. #### Container Image The latest container image can be found at: -* `quay.io/coreos/kube-state-metrics:v1.3.0` -* `k8s.gcr.io/kube-state-metrics:v1.3.0` +* `quay.io/coreos/kube-state-metrics:v1.5.0` +* `k8s.gcr.io/kube-state-metrics:v1.5.0` **Note**: The recommended docker registry for kube-state-metrics is `quay.io`. kube-state-metrics on @@ -87,24 +95,26 @@ additional metrics! > * kube_node_status_capacity_nvidia_gpu_cards > * kube_node_status_allocatable_nvidia_gpu_cards > -> are in alpha stage and will be deprecated when the kubernetes accelerator feature support is deprecated in version v1.11. +> are removed in kube-state-metrics v1.4.0. > > Any collectors and metrics based on alpha Kubernetes APIs are excluded from any stability guarantee, > which may be changed at any given release. -See the [`Documentation`](Documentation) directory for documentation of the exposed metrics. +See the [`Documentation`](Documentation) directory for more informations of the exposed metrics. ### Kube-state-metrics self metrics -kube-state-metrics exposes its own metrics under `--telemetry-host` and `--telemetry-port` (default 81). - -| Metric name | Metric type | Description | Labels/tags | -| ----------- | ----------- | ----------- | ----------- | -| ksm_scrape_error_total | Counter | Total scrape errors encountered when scraping a resource | `resource`=<resource name> | -| ksm_resources_per_scrape | Summary | Number of resources returned per scrape | `resource`=<resource name> | +kube-state-metrics exposes its own general process metrics under `--telemetry-host` and `--telemetry-port` (default 81). ### Resource recommendation -Resource usage changes with the size of the cluster. As a general rule, you should allocate +Resource usage for kube-state-metrics changes with the Kubernetes objects(Pods/Nodes/Deployments/Secrects etc.) size of the cluster. +To some extent, the Kubernetes objects in a cluster are in direct proportion to the node number of the cluster. +[addon-resizer](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer) +can watch and automatically vertically scale the dependent container up and down based on the number of nodes. +Thus kube-state-metrics uses `addon-resizer` to automatically scale its resource request. As for the detailed usage about +`addon-resizer` please go to its [ReadMe](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer#nanny-program-and-arguments). + +As a general rule, you should allocate * 200MiB memory * 0.1 cores @@ -116,20 +126,22 @@ For clusters of more than 100 nodes, allocate at least These numbers are based on [scalability tests](https://github.com/kubernetes/kube-state-metrics/issues/124#issuecomment-318394185) at 30 pods per node. -### kube-state-metrics vs. Heapster +Note that if CPU limits are set too low, kube-state-metrics' internal queues will not be able to be worked off quickly enough, resulting in increased memory consumption as the queue length grows. If you experience problems resulting from high memory allocation, try increasing the CPU limits. -[Heapster](https://github.com/kubernetes/heapster) is a project which fetches +### kube-state-metrics vs. metrics-server(Heapster) + +[metrics-server](https://github.com/kubernetes-incubator/metrics-server)(Heapster) is a project which fetches metrics (such as CPU and memory utilization) from the Kubernetes API server and nodes and sends them to various time-series backends such as InfluxDB or Google Cloud Monitoring. Its most important function right now is implementing certain metric APIs that Kubernetes components like the horizontal pod auto-scaler query to make decisions. -While Heapster's focus is on forwarding metrics already generated by +While [metrics-server](https://github.com/kubernetes-incubator/metrics-server)(Heapster)'s focus is on forwarding metrics already generated by Kubernetes, kube-state-metrics is focused on generating completely new metrics from Kubernetes' object state (e.g. metrics based on deployments, replica sets, -etc.). The reason not to extend Heapster with kube-state-metrics' abilities is -because the concerns are fundamentally different: Heapster only needs to fetch, +etc.). The reason not to extend [metrics-server](https://github.com/kubernetes-incubator/metrics-server)(Heapster) with kube-state-metrics' abilities is +because the concerns are fundamentally different: [metrics-server](https://github.com/kubernetes-incubator/metrics-server)(Heapster) only needs to fetch, format and forward metrics that already exist, in particular from Kubernetes components, and write them into sinks, which are the actual monitoring systems. kube-state-metrics, in contrast, holds an entire snapshot of @@ -137,11 +149,11 @@ Kubernetes state in memory and continuously generates new metrics based off of it but has no responsibility for exporting its metrics anywhere. In other words, kube-state-metrics itself is designed to be another source for -Heapster (although this is not currently the case). +[metrics-server](https://github.com/kubernetes-incubator/metrics-server)(Heapster) (although this is not currently the case). -Additionally, some monitoring systems such as Prometheus do not use Heapster +Additionally, some monitoring systems such as Prometheus do not use [metrics-server](https://github.com/kubernetes-incubator/metrics-server)(Heapster) for metric collection at all and instead implement their own, but -[Prometheus can scrape metrics from heapster itself to alert on Heapster's health](https://github.com/kubernetes/heapster/blob/master/docs/debugging.md#debuging). +[Prometheus can scrape metrics from metrics-server(Heapster) itself to alert on metrics-server(Heapster)'s health](https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/). Having kube-state-metrics as a separate project enables access to these metrics from those monitoring systems. @@ -180,6 +192,8 @@ metrics right away. kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud info | grep Account | cut -d '[' -f 2 | cut -d ']' -f 1) ``` +Note that your GCP identity is case sensitive but `gcloud info` as of Google Cloud SDK 221.0.0 is not. This means that if your IAM member contains capital letters, the above one-liner may not work for you. If you have 403 forbidden responses after running the above command and kubectl apply -f kubernetes, check the IAM member associated with your account at https://console.cloud.google.com/iam-admin/iam?project=PROJECT_ID. If it contains capital letters, you may need to set the --user flag in the command above to the case-sensitive role listed at https://console.cloud.google.com/iam-admin/iam?project=PROJECT_ID. + After running the above, if you see `Clusterrolebinding "cluster-admin-binding" created`, then you are able to continue with the setup of this service. #### Development @@ -190,7 +204,7 @@ running: > Users can override the apiserver address in KUBE-CONFIG file with `--apiserver` command line. go install - kube-state-metrics --in-cluster=false --port=8080 --telemetry-port=8081 --kubeconfig= + kube-state-metrics --port=8080 --telemetry-port=8081 --kubeconfig= --apiserver= Then curl the metrics endpoint diff --git a/RELEASE.md b/RELEASE.md index a5b450a8a1..47c3d1b4ef 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -15,5 +15,5 @@ * kube-state-metrics image tag used in Kubernetes deployment yaml config * cut the new release branch, i.e., `release-1.2`, or merge/cherry-pick changes onto the minor release branch you intend to tag the release on * cut the new release tag, i.e., `v1.2.0-rc.0` -* ping Googlers(@loburm/@piosz) to build and push newest image to `staging-k8s.gcr.io` +* ping Googlers(@loburm/@piosz) to build and push newest image to `k8s.gcr.io` (or to `staging-k8s.gcr.io` in case of release candidates) * build and push newest image to `quay.io`(@brancz) diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS new file mode 100644 index 0000000000..f660eb7331 --- /dev/null +++ b/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +brancz +andyxning diff --git a/collectors/collectors.go b/collectors/collectors.go deleted file mode 100644 index 875b2189f8..0000000000 --- a/collectors/collectors.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -var ( - resyncPeriod = 5 * time.Minute - - ScrapeErrorTotalMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "ksm_scrape_error_total", - Help: "Total scrape errors encountered when scraping a resource", - }, - []string{"resource"}, - ) - - ResourcesPerScrapeMetric = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "ksm_resources_per_scrape", - Help: "Number of resources returned per scrape", - }, - []string{"resource"}, - ) -) - -type SharedInformerList []cache.SharedInformer - -func NewSharedInformerList(client rest.Interface, resource string, namespaces []string, objType runtime.Object) *SharedInformerList { - sinfs := SharedInformerList{} - for _, namespace := range namespaces { - slw := cache.NewListWatchFromClient(client, resource, namespace, fields.Everything()) - sinfs = append(sinfs, cache.NewSharedInformer(slw, objType, resyncPeriod)) - } - return &sinfs -} - -func (sil SharedInformerList) Run(stopCh <-chan struct{}) { - for _, sinf := range sil { - go sinf.Run(stopCh) - } -} diff --git a/collectors/configmap.go b/collectors/configmap.go deleted file mode 100644 index 4c47850b66..0000000000 --- a/collectors/configmap.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descConfigMapInfo = prometheus.NewDesc( - "kube_configmap_info", - "Information about configmap.", - []string{"namespace", "configmap"}, nil, - ) - - descConfigMapCreated = prometheus.NewDesc( - "kube_configmap_created", - "Unix creation timestamp", - []string{"namespace", "configmap"}, nil, - ) - - descConfigMapMetadataResourceVersion = prometheus.NewDesc( - "kube_configmap_metadata_resource_version", - "Resource version representing a specific version of the configmap.", - []string{"namespace", "configmap", "resource_version"}, nil, - ) -) - -type ConfigMapLister func() ([]v1.ConfigMap, error) - -func (l ConfigMapLister) List() ([]v1.ConfigMap, error) { - return l() -} - -func RegisterConfigMapCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect configmap with %s", client.APIVersion()) - - cminfs := NewSharedInformerList(client, "configmaps", namespaces, &v1.ConfigMap{}) - - configMapLister := ConfigMapLister(func() (configMaps []v1.ConfigMap, err error) { - for _, cminf := range *cminfs { - for _, m := range cminf.GetStore().List() { - configMaps = append(configMaps, *m.(*v1.ConfigMap)) - } - } - return configMaps, nil - }) - - registry.MustRegister(&configMapCollector{store: configMapLister}) - cminfs.Run(context.Background().Done()) -} - -type configMapStore interface { - List() (configMaps []v1.ConfigMap, err error) -} - -// configMapCollector collects metrics about all configMaps in the cluster. -type configMapCollector struct { - store configMapStore -} - -// Describe implements the prometheus.Collector interface. -func (sc *configMapCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descConfigMapInfo - ch <- descConfigMapCreated - ch <- descConfigMapMetadataResourceVersion -} - -// Collect implements the prometheus.Collector interface. -func (cmc *configMapCollector) Collect(ch chan<- prometheus.Metric) { - configMaps, err := cmc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "configmap"}).Inc() - glog.Errorf("listing configmaps failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "configmap"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "configmap"}).Observe(float64(len(configMaps))) - for _, s := range configMaps { - cmc.collectConfigMap(ch, s) - } - - glog.V(4).Infof("collected %d configmaps", len(configMaps)) -} - -func (cmc *configMapCollector) collectConfigMap(ch chan<- prometheus.Metric, s v1.ConfigMap) { - addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) { - lv = append([]string{s.Namespace, s.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, t, v, lv...) - } - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - addConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - addGauge(descConfigMapInfo, 1) - - if !s.CreationTimestamp.IsZero() { - addGauge(descConfigMapCreated, float64(s.CreationTimestamp.Unix())) - } - - addGauge(descConfigMapMetadataResourceVersion, 1, string(s.ObjectMeta.ResourceVersion)) -} diff --git a/collectors/configmap_test.go b/collectors/configmap_test.go deleted file mode 100644 index 021bd19a96..0000000000 --- a/collectors/configmap_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockConfigMapStore struct { - f func() ([]v1.ConfigMap, error) -} - -func (ds mockConfigMapStore) List() (configMaps []v1.ConfigMap, err error) { - return ds.f() -} - -func TestConfigMapCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - - startTime := 1501569018 - metav1StartTime := metav1.Unix(int64(startTime), 0) - - const metadata = ` - # HELP kube_configmap_info Information about configmap. - # TYPE kube_configmap_info gauge - # HELP kube_configmap_created Unix creation timestamp - # TYPE kube_configmap_created gauge - # HELP kube_configmap_metadata_resource_version Resource version representing a specific version of the configmap. - # TYPE kube_configmap_metadata_resource_version gauge - ` - cases := []struct { - configMaps []v1.ConfigMap - metrics []string - want string - }{ - { - configMaps: []v1.ConfigMap{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "configmap1", - Namespace: "ns1", - ResourceVersion: "123456", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "configmap2", - Namespace: "ns2", - CreationTimestamp: metav1StartTime, - ResourceVersion: "abcdef", - }, - }, - }, - want: metadata + ` - kube_configmap_info{configmap="configmap1",namespace="ns1"} 1 - kube_configmap_info{configmap="configmap2",namespace="ns2"} 1 - kube_configmap_created{configmap="configmap2",namespace="ns2"} 1.501569018e+09 - kube_configmap_metadata_resource_version{configmap="configmap1",namespace="ns1",resource_version="123456"} 1 - kube_configmap_metadata_resource_version{configmap="configmap2",namespace="ns2",resource_version="abcdef"} 1 - `, - metrics: []string{"kube_configmap_info", "kube_configmap_created", "kube_configmap_metadata_resource_version"}, - }, - } - for _, c := range cases { - cmc := &configMapCollector{ - store: mockConfigMapStore{ - f: func() ([]v1.ConfigMap, error) { return c.configMaps, nil }, - }, - } - if err := gatherAndCompare(cmc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/cronjob.go b/collectors/cronjob.go deleted file mode 100644 index 45befedc19..0000000000 --- a/collectors/cronjob.go +++ /dev/null @@ -1,202 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "fmt" - "time" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "github.com/robfig/cron" - "golang.org/x/net/context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - - batchv1beta1 "k8s.io/api/batch/v1beta1" -) - -var ( - descCronJobLabelsName = "kube_cronjob_labels" - descCronJobLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descCronJobLabelsDefaultLabels = []string{"namespace", "cronjob"} - - descCronJobLabels = prometheus.NewDesc( - descCronJobLabelsName, - descCronJobLabelsHelp, - descCronJobLabelsDefaultLabels, nil, - ) - - descCronJobInfo = prometheus.NewDesc( - "kube_cronjob_info", - "Info about cronjob.", - []string{"namespace", "cronjob", "schedule", "concurrency_policy"}, nil, - ) - descCronJobCreated = prometheus.NewDesc( - "kube_cronjob_created", - "Unix creation timestamp", - []string{"namespace", "cronjob"}, nil, - ) - descCronJobStatusActive = prometheus.NewDesc( - "kube_cronjob_status_active", - "Active holds pointers to currently running jobs.", - []string{"namespace", "cronjob"}, nil, - ) - descCronJobStatusLastScheduleTime = prometheus.NewDesc( - "kube_cronjob_status_last_schedule_time", - "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", - []string{"namespace", "cronjob"}, nil, - ) - descCronJobSpecSuspend = prometheus.NewDesc( - "kube_cronjob_spec_suspend", - "Suspend flag tells the controller to suspend subsequent executions.", - []string{"namespace", "cronjob"}, nil, - ) - descCronJobSpecStartingDeadlineSeconds = prometheus.NewDesc( - "kube_cronjob_spec_starting_deadline_seconds", - "Deadline in seconds for starting the job if it misses scheduled time for any reason.", - []string{"namespace", "cronjob"}, nil, - ) - descCronJobNextScheduledTime = prometheus.NewDesc( - "kube_cronjob_next_schedule_time", - "Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed.", - []string{"namespace", "cronjob"}, nil, - ) -) - -type CronJobLister func() ([]batchv1beta1.CronJob, error) - -func (l CronJobLister) List() ([]batchv1beta1.CronJob, error) { - return l() -} - -func RegisterCronJobCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.BatchV1beta1().RESTClient() - glog.Infof("collect cronjob with %s", client.APIVersion()) - - cjinfs := NewSharedInformerList(client, "cronjobs", namespaces, &batchv1beta1.CronJob{}) - - cronJobLister := CronJobLister(func() (cronjobs []batchv1beta1.CronJob, err error) { - for _, cjinf := range *cjinfs { - for _, c := range cjinf.GetStore().List() { - cronjobs = append(cronjobs, *(c.(*batchv1beta1.CronJob))) - } - } - return cronjobs, nil - }) - - registry.MustRegister(&cronJobCollector{store: cronJobLister}) - cjinfs.Run(context.Background().Done()) -} - -type cronJobStore interface { - List() (cronjobs []batchv1beta1.CronJob, err error) -} - -// cronJobCollector collects metrics about all cronjobs in the cluster. -type cronJobCollector struct { - store cronJobStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *cronJobCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descCronJobInfo - ch <- descCronJobCreated - ch <- descCronJobLabels - ch <- descCronJobStatusActive - ch <- descCronJobStatusLastScheduleTime - ch <- descCronJobSpecSuspend - ch <- descCronJobSpecStartingDeadlineSeconds - ch <- descCronJobNextScheduledTime -} - -// Collect implements the prometheus.Collector interface. -func (cjc *cronJobCollector) Collect(ch chan<- prometheus.Metric) { - cronjobs, err := cjc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "cronjob"}).Inc() - glog.Errorf("listing cronjobs failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "cronjob"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "cronjob"}).Observe(float64(len(cronjobs))) - for _, cj := range cronjobs { - cjc.collectCronJob(ch, cj) - } - - glog.V(4).Infof("collected %d cronjobs", len(cronjobs)) -} - -func getNextScheduledTime(schedule string, lastScheduleTime *metav1.Time, createdTime metav1.Time) (time.Time, error) { - sched, err := cron.ParseStandard(schedule) - if err != nil { - return time.Time{}, fmt.Errorf("Failed to parse cron job schedule '%s': %s", schedule, err) - } - if !lastScheduleTime.IsZero() { - return sched.Next((*lastScheduleTime).Time), nil - } - if !createdTime.IsZero() { - return sched.Next(createdTime.Time), nil - } - return time.Time{}, fmt.Errorf("Created time and lastScheduleTime are both zero") -} - -func cronJobLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descCronJobLabelsName, - descCronJobLabelsHelp, - append(descCronJobLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (jc *cronJobCollector) collectCronJob(ch chan<- prometheus.Metric, j batchv1beta1.CronJob) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{j.Namespace, j.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - - if j.Spec.StartingDeadlineSeconds != nil { - addGauge(descCronJobSpecStartingDeadlineSeconds, float64(*j.Spec.StartingDeadlineSeconds)) - } - - // If the cron job is suspended, don't track the next scheduled time - nextScheduledTime, err := getNextScheduledTime(j.Spec.Schedule, j.Status.LastScheduleTime, j.CreationTimestamp) - if err != nil { - glog.Errorf("%s", err) - } else if !*j.Spec.Suspend { - addGauge(descCronJobNextScheduledTime, float64(nextScheduledTime.Unix())) - } - - addGauge(descCronJobInfo, 1, j.Spec.Schedule, string(j.Spec.ConcurrencyPolicy)) - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(j.Labels) - addGauge(cronJobLabelsDesc(labelKeys), 1, labelValues...) - - if !j.CreationTimestamp.IsZero() { - addGauge(descCronJobCreated, float64(j.CreationTimestamp.Unix())) - } - addGauge(descCronJobStatusActive, float64(len(j.Status.Active))) - if j.Spec.Suspend != nil { - addGauge(descCronJobSpecSuspend, boolFloat64(*j.Spec.Suspend)) - } - - if j.Status.LastScheduleTime != nil { - addGauge(descCronJobStatusLastScheduleTime, float64(j.Status.LastScheduleTime.Unix())) - } -} diff --git a/collectors/cronjob_test.go b/collectors/cronjob_test.go deleted file mode 100644 index 230ee80fc0..0000000000 --- a/collectors/cronjob_test.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "fmt" - "math" - "testing" - "time" - - batchv1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - SuspendTrue = true - SuspendFalse = false - StartingDeadlineSeconds300 int64 = 300 - - // "1520742896" is "2018/3/11 12:34:56" in "Asia/Shanghai". - ActiveRunningCronJob1LastScheduleTime = time.Unix(1520742896, 0) - SuspendedCronJob1LastScheduleTime = time.Unix(1520742896+5.5*3600, 0) // 5.5 hours later - ActiveCronJob1NoLastScheduledCreationTimestamp = time.Unix(1520742896+6.5*3600, 0) -) - -type mockCronJobStore struct { - f func() ([]batchv1beta1.CronJob, error) -} - -func (cjs mockCronJobStore) List() (cronJobs []batchv1beta1.CronJob, err error) { - return cjs.f() -} - -func TestCronJobCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - - hour := ActiveRunningCronJob1LastScheduleTime.Hour() - ActiveRunningCronJob1NextScheduleTime := time.Time{} - switch { - case hour < 6: - ActiveRunningCronJob1NextScheduleTime = time.Date( - ActiveRunningCronJob1LastScheduleTime.Year(), - ActiveRunningCronJob1LastScheduleTime.Month(), - ActiveRunningCronJob1LastScheduleTime.Day(), - 6, - 0, - 0, 0, time.Local) - case hour < 12: - ActiveRunningCronJob1NextScheduleTime = time.Date( - ActiveRunningCronJob1LastScheduleTime.Year(), - ActiveRunningCronJob1LastScheduleTime.Month(), - ActiveRunningCronJob1LastScheduleTime.Day(), - 12, - 0, - 0, 0, time.Local) - case hour < 18: - ActiveRunningCronJob1NextScheduleTime = time.Date( - ActiveRunningCronJob1LastScheduleTime.Year(), - ActiveRunningCronJob1LastScheduleTime.Month(), - ActiveRunningCronJob1LastScheduleTime.Day(), - 18, - 0, - 0, 0, time.Local) - case hour < 24: - ActiveRunningCronJob1NextScheduleTime = time.Date( - ActiveRunningCronJob1LastScheduleTime.Year(), - ActiveRunningCronJob1LastScheduleTime.Month(), - ActiveRunningCronJob1LastScheduleTime.Day(), - 24, - 0, - 0, 0, time.Local) - } - - minute := ActiveCronJob1NoLastScheduledCreationTimestamp.Minute() - ActiveCronJob1NoLastScheduledNextScheduleTime := time.Time{} - switch { - case minute < 25: - ActiveCronJob1NoLastScheduledNextScheduleTime = time.Date( - ActiveCronJob1NoLastScheduledCreationTimestamp.Year(), - ActiveCronJob1NoLastScheduledCreationTimestamp.Month(), - ActiveCronJob1NoLastScheduledCreationTimestamp.Day(), - ActiveCronJob1NoLastScheduledCreationTimestamp.Hour(), - 25, - 0, 0, time.Local) - default: - ActiveCronJob1NoLastScheduledNextScheduleTime = time.Date( - ActiveCronJob1NoLastScheduledNextScheduleTime.Year(), - ActiveCronJob1NoLastScheduledNextScheduleTime.Month(), - ActiveCronJob1NoLastScheduledNextScheduleTime.Day(), - ActiveCronJob1NoLastScheduledNextScheduleTime.Hour()+1, - 25, - 0, 0, time.Local) - } - - const metadata = ` - # HELP kube_cronjob_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_cronjob_labels gauge - # HELP kube_cronjob_info Info about cronjob. - # TYPE kube_cronjob_info gauge - # HELP kube_cronjob_created Unix creation timestamp - # TYPE kube_cronjob_created gauge - # HELP kube_cronjob_spec_starting_deadline_seconds Deadline in seconds for starting the job if it misses scheduled time for any reason. - # TYPE kube_cronjob_spec_starting_deadline_seconds gauge - # HELP kube_cronjob_spec_suspend Suspend flag tells the controller to suspend subsequent executions. - # TYPE kube_cronjob_spec_suspend gauge - # HELP kube_cronjob_status_active Active holds pointers to currently running jobs. - # TYPE kube_cronjob_status_active gauge - # HELP kube_cronjob_status_last_schedule_time LastScheduleTime keeps information of when was the last time the job was successfully scheduled. - # TYPE kube_cronjob_status_last_schedule_time gauge - # HELP kube_cronjob_next_schedule_time Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed. - # TYPE kube_cronjob_next_schedule_time gauge - ` - cases := []struct { - cronJobs []batchv1beta1.CronJob - want string - }{ - { - cronJobs: []batchv1beta1.CronJob{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "ActiveRunningCronJob1", - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-active-running-1", - }, - }, - Status: batchv1beta1.CronJobStatus{ - Active: []v1.ObjectReference{{Name: "FakeJob1"}, {Name: "FakeJob2"}}, - LastScheduleTime: &metav1.Time{Time: ActiveRunningCronJob1LastScheduleTime}, - }, - Spec: batchv1beta1.CronJobSpec{ - StartingDeadlineSeconds: &StartingDeadlineSeconds300, - ConcurrencyPolicy: "Forbid", - Suspend: &SuspendFalse, - Schedule: "0 */6 * * *", - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "SuspendedCronJob1", - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-suspended-1", - }, - }, - Status: batchv1beta1.CronJobStatus{ - Active: []v1.ObjectReference{}, - LastScheduleTime: &metav1.Time{Time: SuspendedCronJob1LastScheduleTime}, - }, - Spec: batchv1beta1.CronJobSpec{ - StartingDeadlineSeconds: &StartingDeadlineSeconds300, - ConcurrencyPolicy: "Forbid", - Suspend: &SuspendTrue, - Schedule: "0 */3 * * *", - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "ActiveCronJob1NoLastScheduled", - CreationTimestamp: metav1.Time{Time: ActiveCronJob1NoLastScheduledCreationTimestamp}, - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-active-no-last-scheduled-1", - }, - }, - Status: batchv1beta1.CronJobStatus{ - Active: []v1.ObjectReference{}, - LastScheduleTime: nil, - }, - Spec: batchv1beta1.CronJobSpec{ - StartingDeadlineSeconds: &StartingDeadlineSeconds300, - ConcurrencyPolicy: "Forbid", - Suspend: &SuspendFalse, - Schedule: "25 * * * *", - }, - }, - }, - want: metadata + ` - kube_cronjob_created{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 1.520766296e+09 - - kube_cronjob_info{concurrency_policy="Forbid",cronjob="ActiveRunningCronJob1",namespace="ns1",schedule="0 */6 * * *"} 1 - kube_cronjob_info{concurrency_policy="Forbid",cronjob="SuspendedCronJob1",namespace="ns1",schedule="0 */3 * * *"} 1 - kube_cronjob_info{concurrency_policy="Forbid",cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1",schedule="25 * * * *"} 1 - - kube_cronjob_labels{cronjob="ActiveCronJob1NoLastScheduled",label_app="example-active-no-last-scheduled-1",namespace="ns1"} 1 - kube_cronjob_labels{cronjob="ActiveRunningCronJob1",label_app="example-active-running-1",namespace="ns1"} 1 - kube_cronjob_labels{cronjob="SuspendedCronJob1",label_app="example-suspended-1",namespace="ns1"} 1 - ` + - fmt.Sprintf("kube_cronjob_next_schedule_time{cronjob=\"ActiveCronJob1NoLastScheduled\",namespace=\"ns1\"} %ve+09\n", - float64(ActiveCronJob1NoLastScheduledNextScheduleTime.Unix())/math.Pow10(9)) + - fmt.Sprintf("kube_cronjob_next_schedule_time{cronjob=\"ActiveRunningCronJob1\",namespace=\"ns1\"} %ve+09\n", - float64(ActiveRunningCronJob1NextScheduleTime.Unix())/math.Pow10(9)) + - ` - kube_cronjob_spec_starting_deadline_seconds{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 300 - kube_cronjob_spec_starting_deadline_seconds{cronjob="ActiveRunningCronJob1",namespace="ns1"} 300 - kube_cronjob_spec_starting_deadline_seconds{cronjob="SuspendedCronJob1",namespace="ns1"} 300 - - kube_cronjob_spec_suspend{cronjob="ActiveRunningCronJob1",namespace="ns1"} 0 - kube_cronjob_spec_suspend{cronjob="SuspendedCronJob1",namespace="ns1"} 1 - kube_cronjob_spec_suspend{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 0 - - kube_cronjob_status_active{cronjob="ActiveRunningCronJob1",namespace="ns1"} 2 - kube_cronjob_status_active{cronjob="SuspendedCronJob1",namespace="ns1"} 0 - kube_cronjob_status_active{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 0 - - kube_cronjob_status_last_schedule_time{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1.520742896e+09 - kube_cronjob_status_last_schedule_time{cronjob="SuspendedCronJob1",namespace="ns1"} 1.520762696e+09 - `, - }, - } - for _, c := range cases { - cjc := &cronJobCollector{ - store: mockCronJobStore{ - f: func() ([]batchv1beta1.CronJob, error) { return c.cronJobs, nil }, - }, - } - if err := gatherAndCompare(cjc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/daemonset.go b/collectors/daemonset.go deleted file mode 100644 index 765a27e0d4..0000000000 --- a/collectors/daemonset.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/extensions/v1beta1" - "k8s.io/client-go/kubernetes" -) - -var ( - descDaemonSetLabelsName = "kube_daemonset_labels" - descDaemonSetLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descDaemonSetLabelsDefaultLabels = []string{"namespace", "daemonset"} - - descDaemonSetCreated = prometheus.NewDesc( - "kube_daemonset_created", - "Unix creation timestamp", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetCurrentNumberScheduled = prometheus.NewDesc( - "kube_daemonset_status_current_number_scheduled", - "The number of nodes running at least one daemon pod and are supposed to.", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetDesiredNumberScheduled = prometheus.NewDesc( - "kube_daemonset_status_desired_number_scheduled", - "The number of nodes that should be running the daemon pod.", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetNumberAvailable = prometheus.NewDesc( - "kube_daemonset_status_number_available", - "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetNumberMisscheduled = prometheus.NewDesc( - "kube_daemonset_status_number_misscheduled", - "The number of nodes running a daemon pod but are not supposed to.", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetNumberReady = prometheus.NewDesc( - "kube_daemonset_status_number_ready", - "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetNumberUnavailable = prometheus.NewDesc( - "kube_daemonset_status_number_unavailable", - "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetUpdatedNumberScheduled = prometheus.NewDesc( - "kube_daemonset_updated_number_scheduled", - "The total number of nodes that are running updated daemon pod", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetMetadataGeneration = prometheus.NewDesc( - "kube_daemonset_metadata_generation", - "Sequence number representing a specific generation of the desired state.", - []string{"namespace", "daemonset"}, nil, - ) - descDaemonSetLabels = prometheus.NewDesc( - descDaemonSetLabelsName, - descDaemonSetLabelsHelp, - descDaemonSetLabelsDefaultLabels, nil, - ) -) - -type DaemonSetLister func() ([]v1beta1.DaemonSet, error) - -func (l DaemonSetLister) List() ([]v1beta1.DaemonSet, error) { - return l() -} - -func RegisterDaemonSetCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.ExtensionsV1beta1().RESTClient() - glog.Infof("collect daemonset with %s", client.APIVersion()) - - dsinfs := NewSharedInformerList(client, "daemonsets", namespaces, &v1beta1.DaemonSet{}) - - dsLister := DaemonSetLister(func() (daemonsets []v1beta1.DaemonSet, err error) { - for _, dsinf := range *dsinfs { - for _, c := range dsinf.GetStore().List() { - daemonsets = append(daemonsets, *(c.(*v1beta1.DaemonSet))) - } - } - return daemonsets, nil - }) - - registry.MustRegister(&daemonsetCollector{store: dsLister}) - dsinfs.Run(context.Background().Done()) -} - -type daemonsetStore interface { - List() (daemonsets []v1beta1.DaemonSet, err error) -} - -// daemonsetCollector collects metrics about all daemonsets in the cluster. -type daemonsetCollector struct { - store daemonsetStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *daemonsetCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descDaemonSetCreated - ch <- descDaemonSetCurrentNumberScheduled - ch <- descDaemonSetNumberAvailable - ch <- descDaemonSetNumberMisscheduled - ch <- descDaemonSetNumberUnavailable - ch <- descDaemonSetDesiredNumberScheduled - ch <- descDaemonSetNumberReady - ch <- descDaemonSetUpdatedNumberScheduled - ch <- descDaemonSetMetadataGeneration - ch <- descDaemonSetLabels -} - -// Collect implements the prometheus.Collector interface. -func (dc *daemonsetCollector) Collect(ch chan<- prometheus.Metric) { - dss, err := dc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "daemonset"}).Inc() - glog.Errorf("listing daemonsets failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "daemonset"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "daemonset"}).Observe(float64(len(dss))) - for _, d := range dss { - dc.collectDaemonSet(ch, d) - } - - glog.V(4).Infof("collected %d daemonsets", len(dss)) -} - -func DaemonSetLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descDaemonSetLabelsName, - descDaemonSetLabelsHelp, - append(descDaemonSetLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (dc *daemonsetCollector) collectDaemonSet(ch chan<- prometheus.Metric, d v1beta1.DaemonSet) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{d.Namespace, d.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - if !d.CreationTimestamp.IsZero() { - addGauge(descDaemonSetCreated, float64(d.CreationTimestamp.Unix())) - } - addGauge(descDaemonSetCurrentNumberScheduled, float64(d.Status.CurrentNumberScheduled)) - addGauge(descDaemonSetNumberAvailable, float64(d.Status.NumberAvailable)) - addGauge(descDaemonSetNumberUnavailable, float64(d.Status.NumberUnavailable)) - addGauge(descDaemonSetNumberMisscheduled, float64(d.Status.NumberMisscheduled)) - addGauge(descDaemonSetDesiredNumberScheduled, float64(d.Status.DesiredNumberScheduled)) - addGauge(descDaemonSetNumberReady, float64(d.Status.NumberReady)) - addGauge(descDaemonSetUpdatedNumberScheduled, float64(d.Status.UpdatedNumberScheduled)) - addGauge(descDaemonSetMetadataGeneration, float64(d.ObjectMeta.Generation)) - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(d.ObjectMeta.Labels) - addGauge(DaemonSetLabelsDesc(labelKeys), 1, labelValues...) -} diff --git a/collectors/daemonset_test.go b/collectors/daemonset_test.go deleted file mode 100644 index ee1f986b4b..0000000000 --- a/collectors/daemonset_test.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockDaemonSetStore struct { - f func() ([]v1beta1.DaemonSet, error) -} - -func (ds mockDaemonSetStore) List() (daemonsets []v1beta1.DaemonSet, err error) { - return ds.f() -} - -func TestDaemonSetCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_daemonset_created Unix creation timestamp - # TYPE kube_daemonset_created gauge - # HELP kube_daemonset_metadata_generation Sequence number representing a specific generation of the desired state. - # TYPE kube_daemonset_metadata_generation gauge - # HELP kube_daemonset_status_current_number_scheduled The number of nodes running at least one daemon pod and are supposed to. - # TYPE kube_daemonset_status_current_number_scheduled gauge - # HELP kube_daemonset_status_number_misscheduled The number of nodes running a daemon pod but are not supposed to. - # TYPE kube_daemonset_status_number_misscheduled gauge - # HELP kube_daemonset_status_desired_number_scheduled The number of nodes that should be running the daemon pod. - # TYPE kube_daemonset_status_desired_number_scheduled gauge - # HELP kube_daemonset_status_number_available The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available - # TYPE kube_daemonset_status_number_available gauge - # HELP kube_daemonset_status_number_ready The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. - # TYPE kube_daemonset_status_number_ready gauge - # HELP kube_daemonset_status_number_unavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available - # TYPE kube_daemonset_status_number_unavailable gauge - # HELP kube_daemonset_updated_number_scheduled The total number of nodes that are running updated daemon pod - # TYPE kube_daemonset_updated_number_scheduled gauge - # HELP kube_daemonset_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_daemonset_labels gauge -` - cases := []struct { - dss []v1beta1.DaemonSet - want string - }{ - { - dss: []v1beta1.DaemonSet{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "ds1", - Namespace: "ns1", - Labels: map[string]string{ - "app": "example1", - }, - Generation: 21, - }, - Status: v1beta1.DaemonSetStatus{ - CurrentNumberScheduled: 15, - NumberMisscheduled: 10, - DesiredNumberScheduled: 5, - NumberReady: 5, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "ds2", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns2", - Labels: map[string]string{ - "app": "example2", - }, - Generation: 14, - }, - Status: v1beta1.DaemonSetStatus{ - CurrentNumberScheduled: 10, - NumberMisscheduled: 5, - DesiredNumberScheduled: 0, - NumberReady: 0, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "ds3", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns3", - Labels: map[string]string{ - "app": "example3", - }, - Generation: 15, - }, - Status: v1beta1.DaemonSetStatus{ - CurrentNumberScheduled: 10, - NumberMisscheduled: 5, - DesiredNumberScheduled: 15, - NumberReady: 5, - NumberAvailable: 5, - NumberUnavailable: 5, - UpdatedNumberScheduled: 5, - }, - }, - }, - want: metadata + ` - kube_daemonset_created{daemonset="ds2",namespace="ns2"} 1.5e+09 - kube_daemonset_created{daemonset="ds3",namespace="ns3"} 1.5e+09 - kube_daemonset_metadata_generation{namespace="ns1",daemonset="ds1"} 21 - kube_daemonset_metadata_generation{namespace="ns2",daemonset="ds2"} 14 - kube_daemonset_metadata_generation{namespace="ns3",daemonset="ds3"} 15 - kube_daemonset_status_current_number_scheduled{namespace="ns1",daemonset="ds1"} 15 - kube_daemonset_status_current_number_scheduled{namespace="ns2",daemonset="ds2"} 10 - kube_daemonset_status_current_number_scheduled{namespace="ns3",daemonset="ds3"} 10 - kube_daemonset_status_desired_number_scheduled{namespace="ns1",daemonset="ds1"} 5 - kube_daemonset_status_desired_number_scheduled{namespace="ns2",daemonset="ds2"} 0 - kube_daemonset_status_desired_number_scheduled{namespace="ns3",daemonset="ds3"} 15 - kube_daemonset_status_number_available{daemonset="ds1",namespace="ns1"} 0 - kube_daemonset_status_number_available{daemonset="ds2",namespace="ns2"} 0 - kube_daemonset_status_number_available{daemonset="ds3",namespace="ns3"} 5 - kube_daemonset_status_number_misscheduled{namespace="ns1",daemonset="ds1"} 10 - kube_daemonset_status_number_misscheduled{namespace="ns2",daemonset="ds2"} 5 - kube_daemonset_status_number_misscheduled{namespace="ns3",daemonset="ds3"} 5 - kube_daemonset_status_number_ready{namespace="ns1",daemonset="ds1"} 5 - kube_daemonset_status_number_ready{namespace="ns2",daemonset="ds2"} 0 - kube_daemonset_status_number_ready{namespace="ns3",daemonset="ds3"} 5 - kube_daemonset_status_number_unavailable{daemonset="ds1",namespace="ns1"} 0 - kube_daemonset_status_number_unavailable{daemonset="ds2",namespace="ns2"} 0 - kube_daemonset_status_number_unavailable{daemonset="ds3",namespace="ns3"} 5 - kube_daemonset_updated_number_scheduled{daemonset="ds1",namespace="ns1"} 0 - kube_daemonset_updated_number_scheduled{daemonset="ds2",namespace="ns2"} 0 - kube_daemonset_updated_number_scheduled{daemonset="ds3",namespace="ns3"} 5 - kube_daemonset_labels{label_app="example1",namespace="ns1",daemonset="ds1"} 1 - kube_daemonset_labels{label_app="example2",namespace="ns2",daemonset="ds2"} 1 - kube_daemonset_labels{label_app="example3",namespace="ns3",daemonset="ds3"} 1 - `, - }, - } - for _, c := range cases { - dc := &daemonsetCollector{ - store: mockDaemonSetStore{ - f: func() ([]v1beta1.DaemonSet, error) { return c.dss, nil }, - }, - } - if err := gatherAndCompare(dc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/deployment.go b/collectors/deployment.go deleted file mode 100644 index beaa9923b7..0000000000 --- a/collectors/deployment.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" -) - -var ( - descDeploymentLabelsName = "kube_deployment_labels" - descDeploymentLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descDeploymentLabelsDefaultLabels = []string{"namespace", "deployment"} - - descDeploymentCreated = prometheus.NewDesc( - "kube_deployment_created", - "Unix creation timestamp", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentStatusReplicas = prometheus.NewDesc( - "kube_deployment_status_replicas", - "The number of replicas per deployment.", - []string{"namespace", "deployment"}, nil, - ) - descDeploymentStatusReplicasAvailable = prometheus.NewDesc( - "kube_deployment_status_replicas_available", - "The number of available replicas per deployment.", - []string{"namespace", "deployment"}, nil, - ) - descDeploymentStatusReplicasUnavailable = prometheus.NewDesc( - "kube_deployment_status_replicas_unavailable", - "The number of unavailable replicas per deployment.", - []string{"namespace", "deployment"}, nil, - ) - descDeploymentStatusReplicasUpdated = prometheus.NewDesc( - "kube_deployment_status_replicas_updated", - "The number of updated replicas per deployment.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentStatusObservedGeneration = prometheus.NewDesc( - "kube_deployment_status_observed_generation", - "The generation observed by the deployment controller.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentSpecReplicas = prometheus.NewDesc( - "kube_deployment_spec_replicas", - "Number of desired pods for a deployment.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentSpecPaused = prometheus.NewDesc( - "kube_deployment_spec_paused", - "Whether the deployment is paused and will not be processed by the deployment controller.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentStrategyRollingUpdateMaxUnavailable = prometheus.NewDesc( - "kube_deployment_spec_strategy_rollingupdate_max_unavailable", - "Maximum number of unavailable replicas during a rolling update of a deployment.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentStrategyRollingUpdateMaxSurge = prometheus.NewDesc( - "kube_deployment_spec_strategy_rollingupdate_max_surge", - "Maximum number of replicas that can be scheduled above the desired number of replicas during a rolling update of a deployment.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentMetadataGeneration = prometheus.NewDesc( - "kube_deployment_metadata_generation", - "Sequence number representing a specific generation of the desired state.", - []string{"namespace", "deployment"}, nil, - ) - - descDeploymentLabels = prometheus.NewDesc( - descDeploymentLabelsName, - descDeploymentLabelsHelp, - descDeploymentLabelsDefaultLabels, nil, - ) -) - -type DeploymentLister func() ([]v1beta1.Deployment, error) - -func (l DeploymentLister) List() ([]v1beta1.Deployment, error) { - return l() -} - -func RegisterDeploymentCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.ExtensionsV1beta1().RESTClient() - glog.Infof("collect deployment with %s", client.APIVersion()) - - dinfs := NewSharedInformerList(client, "deployments", namespaces, &v1beta1.Deployment{}) - - dplLister := DeploymentLister(func() (deployments []v1beta1.Deployment, err error) { - for _, dinf := range *dinfs { - for _, c := range dinf.GetStore().List() { - deployments = append(deployments, *(c.(*v1beta1.Deployment))) - } - } - return deployments, nil - }) - - registry.MustRegister(&deploymentCollector{store: dplLister}) - dinfs.Run(context.Background().Done()) -} - -type deploymentStore interface { - List() (deployments []v1beta1.Deployment, err error) -} - -// deploymentCollector collects metrics about all deployments in the cluster. -type deploymentCollector struct { - store deploymentStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *deploymentCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descDeploymentCreated - ch <- descDeploymentStatusReplicas - ch <- descDeploymentStatusReplicasAvailable - ch <- descDeploymentStatusReplicasUnavailable - ch <- descDeploymentStatusReplicasUpdated - ch <- descDeploymentStatusObservedGeneration - ch <- descDeploymentSpecPaused - ch <- descDeploymentStrategyRollingUpdateMaxUnavailable - ch <- descDeploymentStrategyRollingUpdateMaxSurge - ch <- descDeploymentSpecReplicas - ch <- descDeploymentMetadataGeneration - ch <- descDeploymentLabels -} - -// Collect implements the prometheus.Collector interface. -func (dc *deploymentCollector) Collect(ch chan<- prometheus.Metric) { - ds, err := dc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "deployment"}).Inc() - glog.Errorf("listing deployments failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "deployment"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "deployment"}).Observe(float64(len(ds))) - for _, d := range ds { - dc.collectDeployment(ch, d) - } - - glog.V(4).Infof("collected %d deployments", len(ds)) -} - -func deploymentLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descDeploymentLabelsName, - descDeploymentLabelsHelp, - append(descDeploymentLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (dc *deploymentCollector) collectDeployment(ch chan<- prometheus.Metric, d v1beta1.Deployment) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{d.Namespace, d.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - labelKeys, labelValues := kubeLabelsToPrometheusLabels(d.Labels) - addGauge(deploymentLabelsDesc(labelKeys), 1, labelValues...) - if !d.CreationTimestamp.IsZero() { - addGauge(descDeploymentCreated, float64(d.CreationTimestamp.Unix())) - } - addGauge(descDeploymentStatusReplicas, float64(d.Status.Replicas)) - addGauge(descDeploymentStatusReplicasAvailable, float64(d.Status.AvailableReplicas)) - addGauge(descDeploymentStatusReplicasUnavailable, float64(d.Status.UnavailableReplicas)) - addGauge(descDeploymentStatusReplicasUpdated, float64(d.Status.UpdatedReplicas)) - addGauge(descDeploymentStatusObservedGeneration, float64(d.Status.ObservedGeneration)) - addGauge(descDeploymentSpecPaused, boolFloat64(d.Spec.Paused)) - addGauge(descDeploymentSpecReplicas, float64(*d.Spec.Replicas)) - addGauge(descDeploymentMetadataGeneration, float64(d.ObjectMeta.Generation)) - - if d.Spec.Strategy.RollingUpdate == nil { - return - } - - maxUnavailable, err := intstr.GetValueFromIntOrPercent(d.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*d.Spec.Replicas), true) - if err != nil { - glog.Errorf("Error converting RollingUpdate MaxUnavailable to int: %s", err) - } else { - addGauge(descDeploymentStrategyRollingUpdateMaxUnavailable, float64(maxUnavailable)) - } - - maxSurge, err := intstr.GetValueFromIntOrPercent(d.Spec.Strategy.RollingUpdate.MaxSurge, int(*d.Spec.Replicas), true) - if err != nil { - glog.Errorf("Error converting RollingUpdate MaxSurge to int: %s", err) - } else { - addGauge(descDeploymentStrategyRollingUpdateMaxSurge, float64(maxSurge)) - } - -} diff --git a/collectors/deployment_test.go b/collectors/deployment_test.go deleted file mode 100644 index 7ec13c2d6f..0000000000 --- a/collectors/deployment_test.go +++ /dev/null @@ -1,335 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strings" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - - "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -var ( - depl1Replicas int32 = 200 - depl2Replicas int32 = 5 - - depl1MaxUnavailable = intstr.FromInt(10) - depl2MaxUnavailable = intstr.FromString("20%") - - depl1MaxSurge = intstr.FromInt(10) - depl2MaxSurge = intstr.FromString("20%") -) - -type mockDeploymentStore struct { - f func() ([]v1beta1.Deployment, error) -} - -func (ds mockDeploymentStore) List() (deployments []v1beta1.Deployment, err error) { - return ds.f() -} - -func TestDeploymentCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_deployment_created Unix creation timestamp - # TYPE kube_deployment_created gauge - # HELP kube_deployment_metadata_generation Sequence number representing a specific generation of the desired state. - # TYPE kube_deployment_metadata_generation gauge - # HELP kube_deployment_spec_paused Whether the deployment is paused and will not be processed by the deployment controller. - # TYPE kube_deployment_spec_paused gauge - # HELP kube_deployment_spec_replicas Number of desired pods for a deployment. - # TYPE kube_deployment_spec_replicas gauge - # HELP kube_deployment_status_replicas The number of replicas per deployment. - # TYPE kube_deployment_status_replicas gauge - # HELP kube_deployment_status_replicas_available The number of available replicas per deployment. - # TYPE kube_deployment_status_replicas_available gauge - # HELP kube_deployment_status_replicas_unavailable The number of unavailable replicas per deployment. - # TYPE kube_deployment_status_replicas_unavailable gauge - # HELP kube_deployment_status_replicas_updated The number of updated replicas per deployment. - # TYPE kube_deployment_status_replicas_updated gauge - # HELP kube_deployment_status_observed_generation The generation observed by the deployment controller. - # TYPE kube_deployment_status_observed_generation gauge - # HELP kube_deployment_spec_strategy_rollingupdate_max_unavailable Maximum number of unavailable replicas during a rolling update of a deployment. - # TYPE kube_deployment_spec_strategy_rollingupdate_max_unavailable gauge - # HELP kube_deployment_spec_strategy_rollingupdate_max_surge Maximum number of replicas that can be scheduled above the desired number of replicas during a rolling update of a deployment. - # TYPE kube_deployment_spec_strategy_rollingupdate_max_surge gauge - # HELP kube_deployment_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_deployment_labels gauge - ` - cases := []struct { - depls []v1beta1.Deployment - want string - }{ - { - depls: []v1beta1.Deployment{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "depl1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns1", - Labels: map[string]string{ - "app": "example1", - }, - Generation: 21, - }, - Status: v1beta1.DeploymentStatus{ - Replicas: 15, - AvailableReplicas: 10, - UnavailableReplicas: 5, - UpdatedReplicas: 2, - ObservedGeneration: 111, - }, - Spec: v1beta1.DeploymentSpec{ - Replicas: &depl1Replicas, - Strategy: v1beta1.DeploymentStrategy{ - RollingUpdate: &v1beta1.RollingUpdateDeployment{ - MaxUnavailable: &depl1MaxUnavailable, - MaxSurge: &depl1MaxSurge, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "depl2", - Namespace: "ns2", - Labels: map[string]string{ - "app": "example2", - }, - Generation: 14, - }, - Status: v1beta1.DeploymentStatus{ - Replicas: 10, - AvailableReplicas: 5, - UnavailableReplicas: 0, - UpdatedReplicas: 1, - ObservedGeneration: 1111, - }, - Spec: v1beta1.DeploymentSpec{ - Paused: true, - Replicas: &depl2Replicas, - Strategy: v1beta1.DeploymentStrategy{ - RollingUpdate: &v1beta1.RollingUpdateDeployment{ - MaxUnavailable: &depl2MaxUnavailable, - MaxSurge: &depl2MaxSurge, - }, - }, - }, - }, - }, - want: metadata + ` - kube_deployment_created{deployment="depl1",namespace="ns1"} 1.5e+09 - kube_deployment_metadata_generation{namespace="ns1",deployment="depl1"} 21 - kube_deployment_metadata_generation{namespace="ns2",deployment="depl2"} 14 - kube_deployment_spec_paused{namespace="ns1",deployment="depl1"} 0 - kube_deployment_spec_paused{namespace="ns2",deployment="depl2"} 1 - kube_deployment_spec_replicas{namespace="ns1",deployment="depl1"} 200 - kube_deployment_spec_replicas{namespace="ns2",deployment="depl2"} 5 - kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="depl1",namespace="ns1"} 10 - kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="depl2",namespace="ns2"} 1 - kube_deployment_spec_strategy_rollingupdate_max_surge{deployment="depl1",namespace="ns1"} 10 - kube_deployment_spec_strategy_rollingupdate_max_surge{deployment="depl2",namespace="ns2"} 1 - kube_deployment_status_observed_generation{namespace="ns1",deployment="depl1"} 111 - kube_deployment_status_observed_generation{namespace="ns2",deployment="depl2"} 1111 - kube_deployment_status_replicas{namespace="ns1",deployment="depl1"} 15 - kube_deployment_status_replicas{namespace="ns2",deployment="depl2"} 10 - kube_deployment_status_replicas_available{namespace="ns1",deployment="depl1"} 10 - kube_deployment_status_replicas_available{namespace="ns2",deployment="depl2"} 5 - kube_deployment_status_replicas_unavailable{namespace="ns1",deployment="depl1"} 5 - kube_deployment_status_replicas_unavailable{namespace="ns2",deployment="depl2"} 0 - kube_deployment_status_replicas_updated{namespace="ns1",deployment="depl1"} 2 - kube_deployment_status_replicas_updated{namespace="ns2",deployment="depl2"} 1 - kube_deployment_labels{label_app="example1",namespace="ns1",deployment="depl1"} 1 - kube_deployment_labels{label_app="example2",namespace="ns2",deployment="depl2"} 1 - `, - }, - } - for _, c := range cases { - dc := &deploymentCollector{ - store: mockDeploymentStore{ - f: func() ([]v1beta1.Deployment, error) { return c.depls, nil }, - }, - } - if err := gatherAndCompare(dc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} - -// gatherAndCompare retrieves all metrics exposed by a collector and compares it -// to an expected output in the Prometheus text exposition format. -// metricNames allows only comparing the given metrics. All are compared if it's nil. -func gatherAndCompare(c prometheus.Collector, expected string, metricNames []string) error { - expected = removeUnusedWhitespace(expected) - - reg := prometheus.NewPedanticRegistry() - if err := reg.Register(c); err != nil { - return fmt.Errorf("registering collector failed: %s", err) - } - metrics, err := reg.Gather() - if err != nil { - return fmt.Errorf("gathering metrics failed: %s", err) - } - if metricNames != nil { - metrics = filterMetrics(metrics, metricNames) - } - var tp expfmt.TextParser - expectedMetrics, err := tp.TextToMetricFamilies(bytes.NewReader([]byte(expected))) - if err != nil { - return fmt.Errorf("parsing expected metrics failed: %s", err) - } - - if !reflect.DeepEqual(metrics, normalizeMetricFamilies(expectedMetrics)) { - // Encode the gathered output to the readbale text format for comparison. - var buf1 bytes.Buffer - enc := expfmt.NewEncoder(&buf1, expfmt.FmtText) - for _, mf := range metrics { - if err := enc.Encode(mf); err != nil { - return fmt.Errorf("encoding result failed: %s", err) - } - } - // Encode normalized expected metrics again to generate them in the same ordering - // the registry does to spot differences more easily. - var buf2 bytes.Buffer - enc = expfmt.NewEncoder(&buf2, expfmt.FmtText) - for _, mf := range normalizeMetricFamilies(expectedMetrics) { - if err := enc.Encode(mf); err != nil { - return fmt.Errorf("encoding result failed: %s", err) - } - } - - return fmt.Errorf(` -metric output does not match expectation; want: - -%s - -got: - -%s -`, buf2.String(), buf1.String()) - } - return nil -} - -func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { - var filtered []*dto.MetricFamily - for _, m := range metrics { - drop := true - for _, name := range names { - if m.GetName() == name { - drop = false - break - } - } - if !drop { - filtered = append(filtered, m) - } - } - return filtered -} - -func removeUnusedWhitespace(s string) string { - var ( - trimmedLine string - trimmedLines []string - lines = strings.Split(s, "\n") - ) - - for _, l := range lines { - trimmedLine = strings.TrimSpace(l) - - if len(trimmedLine) > 0 { - trimmedLines = append(trimmedLines, trimmedLine) - } - } - - // The Prometheus metrics representation parser expects an empty line at the - // end otherwise fails with an unexpected EOF error. - return strings.Join(trimmedLines, "\n") + "\n" -} - -// The below sorting code is copied form the Prometheus client library modulo the added -// label pair sorting. -// https://github.com/prometheus/client_golang/blob/ea6e1db4cb8127eeb0b6954f7320363e5451820f/prometheus/registry.go#L642-L684 - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - sort.Sort(prometheus.LabelPairSorter(s[i].Label)) - sort.Sort(prometheus.LabelPairSorter(s[j].Label)) - - if len(s[i].Label) != len(s[j].Label) { - return len(s[i].Label) < len(s[j].Label) - } - - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// normalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/collectors/endpoint.go b/collectors/endpoint.go deleted file mode 100644 index c977d747d0..0000000000 --- a/collectors/endpoint.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "golang.org/x/net/context" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descEndpointLabelsName = "kube_endpoint_labels" - descEndpointLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descEndpointLabelsDefaultLabels = []string{"namespace", "endpoint"} - - descEndpointInfo = prometheus.NewDesc( - "kube_endpoint_info", - "Information about endpoint.", - []string{"namespace", "endpoint"}, nil, - ) - - descEndpointCreated = prometheus.NewDesc( - "kube_endpoint_created", - "Unix creation timestamp", - []string{"namespace", "endpoint"}, nil, - ) - - descEndpointLabels = prometheus.NewDesc( - descEndpointLabelsName, - descEndpointLabelsHelp, - descEndpointLabelsDefaultLabels, nil, - ) - - descEndpointAddressAvailable = prometheus.NewDesc( - "kube_endpoint_address_available", - "Number of addresses available in endpoint.", - []string{"namespace", "endpoint"}, nil) - - descEndpointAddressNotReady = prometheus.NewDesc( - "kube_endpoint_address_not_ready", - "Number of addresses not ready in endpoint", - []string{"namespace", "endpoint"}, nil) -) - -type EndpointLister func() ([]v1.Endpoints, error) - -func (l EndpointLister) List() ([]v1.Endpoints, error) { - return l() -} - -func RegisterEndpointCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect endpoint with %s", client.APIVersion()) - - sinfs := NewSharedInformerList(client, "endpoints", namespaces, &v1.Endpoints{}) - - endpointLister := EndpointLister(func() (endpoints []v1.Endpoints, err error) { - for _, sinf := range *sinfs { - for _, m := range sinf.GetStore().List() { - endpoints = append(endpoints, *m.(*v1.Endpoints)) - } - } - return endpoints, nil - }) - - registry.MustRegister(&endpointCollector{store: endpointLister}) - sinfs.Run(context.Background().Done()) -} - -type endpointStore interface { - List() (endpoints []v1.Endpoints, err error) -} - -// endpointCollector collects metrics about all endpoints in the cluster. -type endpointCollector struct { - store endpointStore -} - -// Describe implements the prometheus.Collector interface. -func (pc *endpointCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descEndpointInfo - ch <- descEndpointLabels - ch <- descEndpointCreated - ch <- descEndpointAddressAvailable - ch <- descEndpointAddressNotReady -} - -// Collect implements the prometheus.Collector interface. -func (ec *endpointCollector) Collect(ch chan<- prometheus.Metric) { - endpoints, err := ec.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "endpoint"}).Inc() - glog.Errorf("listing endpoints failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "endpoint"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "endpoint"}).Observe(float64(len(endpoints))) - for _, e := range endpoints { - ec.collectEndpoints(ch, e) - } - - glog.V(4).Infof("collected %d endpoints", len(endpoints)) -} - -func (ec *endpointCollector) collectEndpoints(ch chan<- prometheus.Metric, e v1.Endpoints) { - addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) { - lv = append([]string{e.Namespace, e.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, t, v, lv...) - } - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - addConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - - addGauge(descEndpointInfo, 1) - if !e.CreationTimestamp.IsZero() { - addGauge(descEndpointCreated, float64(e.CreationTimestamp.Unix())) - } - labelKeys, labelValues := kubeLabelsToPrometheusLabels(e.Labels) - addGauge(endpointLabelsDesc(labelKeys), 1, labelValues...) - - var available int - for _, s := range e.Subsets { - available += len(s.Addresses) * len(s.Ports) - } - addGauge(descEndpointAddressAvailable, float64(available)) - - var notReady int - for _, s := range e.Subsets { - notReady += len(s.NotReadyAddresses) * len(s.Ports) - } - addGauge(descEndpointAddressNotReady, float64(notReady)) -} - -func endpointLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descEndpointLabelsName, - descEndpointLabelsHelp, - append(descEndpointLabelsDefaultLabels, labelKeys...), - nil, - ) -} diff --git a/collectors/endpoint_test.go b/collectors/endpoint_test.go deleted file mode 100644 index f501a61469..0000000000 --- a/collectors/endpoint_test.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockEndpointStore struct { - list func() ([]v1.Endpoints, error) -} - -func (es mockEndpointStore) List() ([]v1.Endpoints, error) { - return es.list() -} - -func TestEndpointCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_endpoint_address_available Number of addresses available in endpoint. - # TYPE kube_endpoint_address_available gauge - # HELP kube_endpoint_address_not_ready Number of addresses not ready in endpoint - # TYPE kube_endpoint_address_not_ready gauge - # HELP kube_endpoint_created Unix creation timestamp - # TYPE kube_endpoint_created gauge - # HELP kube_endpoint_info Information about endpoint. - # TYPE kube_endpoint_info gauge - # HELP kube_endpoint_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_endpoint_labels gauge - ` - cases := []struct { - endpoints []v1.Endpoints - metrics []string // which metrics should be checked - want string - }{ - { - endpoints: []v1.Endpoints{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-endpoint", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "default", - Labels: map[string]string{ - "app": "foobar", - }, - }, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{ - {IP: "127.0.0.1"}, {IP: "10.0.0.1"}, - }, - Ports: []v1.EndpointPort{ - {Port: 8080}, {Port: 8081}, - }, - }, - {Addresses: []v1.EndpointAddress{ - {IP: "172.22.23.202"}, - }, - Ports: []v1.EndpointPort{ - {Port: 8443}, {Port: 9090}, - }, - }, - {NotReadyAddresses: []v1.EndpointAddress{ - {IP: "192.168.1.1"}, - }, - Ports: []v1.EndpointPort{ - {Port: 1234}, {Port: 5678}, - }, - }, - {NotReadyAddresses: []v1.EndpointAddress{ - {IP: "192.168.1.3"}, {IP: "192.168.2.2"}, - }, - Ports: []v1.EndpointPort{ - {Port: 1234}, {Port: 5678}, - }, - }, - }, - }, - }, - want: metadata + ` - kube_endpoint_address_available{endpoint="test-endpoint",namespace="default"} 6 - kube_endpoint_address_not_ready{endpoint="test-endpoint",namespace="default"} 6 - kube_endpoint_created{endpoint="test-endpoint",namespace="default"} 1.5e+09 - kube_endpoint_info{endpoint="test-endpoint",namespace="default"} 1 - kube_endpoint_labels{endpoint="test-endpoint",label_app="foobar",namespace="default"} 1 - `, - }, - } - for _, c := range cases { - sc := &endpointCollector{ - store: &mockEndpointStore{ - list: func() ([]v1.Endpoints, error) { - return c.endpoints, nil - }, - }, - } - if err := gatherAndCompare(sc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/hpa.go b/collectors/hpa.go deleted file mode 100644 index 088e217619..0000000000 --- a/collectors/hpa.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "context" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - autoscaling "k8s.io/api/autoscaling/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descHorizontalPodAutoscalerLabelsName = "kube_hpa_labels" - descHorizontalPodAutoscalerLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descHorizontalPodAutoscalerLabelsDefaultLabels = []string{"namespace", "hpa"} - - descHorizontalPodAutoscalerMetadataGeneration = prometheus.NewDesc( - "kube_hpa_metadata_generation", - "The generation observed by the HorizontalPodAutoscaler controller.", - []string{"namespace", "hpa"}, nil, - ) - descHorizontalPodAutoscalerSpecMaxReplicas = prometheus.NewDesc( - "kube_hpa_spec_max_replicas", - "Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - []string{"namespace", "hpa"}, nil, - ) - descHorizontalPodAutoscalerSpecMinReplicas = prometheus.NewDesc( - "kube_hpa_spec_min_replicas", - "Lower limit for the number of pods that can be set by the autoscaler, default 1.", - []string{"namespace", "hpa"}, nil, - ) - descHorizontalPodAutoscalerStatusCurrentReplicas = prometheus.NewDesc( - "kube_hpa_status_current_replicas", - "Current number of replicas of pods managed by this autoscaler.", - []string{"namespace", "hpa"}, nil, - ) - descHorizontalPodAutoscalerStatusDesiredReplicas = prometheus.NewDesc( - "kube_hpa_status_desired_replicas", - "Desired number of replicas of pods managed by this autoscaler.", - []string{"namespace", "hpa"}, nil, - ) - descHorizontalPodAutoscalerLabels = prometheus.NewDesc( - descHorizontalPodAutoscalerLabelsName, - descHorizontalPodAutoscalerLabelsHelp, - descHorizontalPodAutoscalerLabelsDefaultLabels, nil, - ) -) - -type HPALister func() (autoscaling.HorizontalPodAutoscalerList, error) - -func (l HPALister) List() (autoscaling.HorizontalPodAutoscalerList, error) { - return l() -} - -func RegisterHorizontalPodAutoScalerCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.Autoscaling().RESTClient() - glog.Infof("collect hpa with %s", client.APIVersion()) - hpainfs := NewSharedInformerList(client, "horizontalpodautoscalers", namespaces, &autoscaling.HorizontalPodAutoscaler{}) - - hpaLister := HPALister(func() (hpas autoscaling.HorizontalPodAutoscalerList, err error) { - for _, hpainf := range *hpainfs { - for _, h := range hpainf.GetStore().List() { - hpas.Items = append(hpas.Items, *(h.(*autoscaling.HorizontalPodAutoscaler))) - } - } - return hpas, nil - }) - - registry.MustRegister(&hpaCollector{store: hpaLister}) - hpainfs.Run(context.Background().Done()) -} - -type hpaStore interface { - List() (hpas autoscaling.HorizontalPodAutoscalerList, err error) -} - -// hpaCollector collects metrics about all Horizontal Pod Austoscalers in the cluster. -type hpaCollector struct { - store hpaStore -} - -// Describe implements the prometheus.Collector interface. -func (hc *hpaCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descHorizontalPodAutoscalerMetadataGeneration - ch <- descHorizontalPodAutoscalerSpecMaxReplicas - ch <- descHorizontalPodAutoscalerSpecMinReplicas - ch <- descHorizontalPodAutoscalerStatusCurrentReplicas - ch <- descHorizontalPodAutoscalerStatusDesiredReplicas - ch <- descHorizontalPodAutoscalerLabels -} - -// Collect implements the prometheus.Collector interface. -func (hc *hpaCollector) Collect(ch chan<- prometheus.Metric) { - hpas, err := hc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "horizontalpodautoscaler"}).Inc() - glog.Errorf("listing HorizontalPodAutoscalers failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "horizontalpodautoscaler"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "horizontalpodautoscaler"}).Observe(float64(len(hpas.Items))) - for _, h := range hpas.Items { - hc.collectHPA(ch, h) - } - - glog.V(4).Infof("collected %d hpas", len(hpas.Items)) -} - -func hpaLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descHorizontalPodAutoscalerLabelsName, - descHorizontalPodAutoscalerLabelsHelp, - append(descHorizontalPodAutoscalerLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (hc *hpaCollector) collectHPA(ch chan<- prometheus.Metric, h autoscaling.HorizontalPodAutoscaler) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{h.Namespace, h.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - labelKeys, labelValues := kubeLabelsToPrometheusLabels(h.Labels) - addGauge(hpaLabelsDesc(labelKeys), 1, labelValues...) - addGauge(descHorizontalPodAutoscalerMetadataGeneration, float64(h.ObjectMeta.Generation)) - addGauge(descHorizontalPodAutoscalerSpecMaxReplicas, float64(h.Spec.MaxReplicas)) - addGauge(descHorizontalPodAutoscalerSpecMinReplicas, float64(*h.Spec.MinReplicas)) - addGauge(descHorizontalPodAutoscalerStatusCurrentReplicas, float64(h.Status.CurrentReplicas)) - addGauge(descHorizontalPodAutoscalerStatusDesiredReplicas, float64(h.Status.DesiredReplicas)) -} diff --git a/collectors/hpa_test.go b/collectors/hpa_test.go deleted file mode 100644 index 5a8fec8cbe..0000000000 --- a/collectors/hpa_test.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - - autoscaling "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - hpa1MinReplicas int32 = 2 -) - -type mockHPAStore struct { - list func() (autoscaling.HorizontalPodAutoscalerList, error) -} - -func (hs mockHPAStore) List() (autoscaling.HorizontalPodAutoscalerList, error) { - return hs.list() -} - -func TestHPACollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_hpa_metadata_generation The generation observed by the HorizontalPodAutoscaler controller. - # TYPE kube_hpa_metadata_generation gauge - # HELP kube_hpa_spec_max_replicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - # TYPE kube_hpa_spec_max_replicas gauge - # HELP kube_hpa_spec_min_replicas Lower limit for the number of pods that can be set by the autoscaler, default 1. - # TYPE kube_hpa_spec_min_replicas gauge - # HELP kube_hpa_status_current_replicas Current number of replicas of pods managed by this autoscaler. - # TYPE kube_hpa_status_current_replicas gauge - # HELP kube_hpa_status_desired_replicas Desired number of replicas of pods managed by this autoscaler. - # TYPE kube_hpa_status_desired_replicas gauge - ` - cases := []struct { - hpas []autoscaling.HorizontalPodAutoscaler - metrics []string // which metrics should be checked - want string - }{ - // Verify populating base metrics. - { - hpas: []autoscaling.HorizontalPodAutoscaler{ - { - ObjectMeta: metav1.ObjectMeta{ - Generation: 2, - Name: "hpa1", - Namespace: "ns1", - }, - Spec: autoscaling.HorizontalPodAutoscalerSpec{ - MaxReplicas: 4, - MinReplicas: &hpa1MinReplicas, - ScaleTargetRef: autoscaling.CrossVersionObjectReference{ - APIVersion: "extensions/v1beta1", - Kind: "Deployment", - Name: "deployment1", - }, - }, - Status: autoscaling.HorizontalPodAutoscalerStatus{ - CurrentReplicas: 2, - DesiredReplicas: 2, - }, - }, - }, - want: metadata + ` - kube_hpa_metadata_generation{hpa="hpa1",namespace="ns1"} 2 - kube_hpa_spec_max_replicas{hpa="hpa1",namespace="ns1"} 4 - kube_hpa_spec_min_replicas{hpa="hpa1",namespace="ns1"} 2 - kube_hpa_status_current_replicas{hpa="hpa1",namespace="ns1"} 2 - kube_hpa_status_desired_replicas{hpa="hpa1",namespace="ns1"} 2 - `, - metrics: []string{ - "kube_hpa_metadata_generation", - "kube_hpa_spec_max_replicas", - "kube_hpa_spec_min_replicas", - "kube_hpa_status_current_replicas", - "kube_hpa_status_desired_replicas", - }, - }, - } - for _, c := range cases { - hc := &hpaCollector{ - store: &mockHPAStore{ - list: func() (autoscaling.HorizontalPodAutoscalerList, error) { - return autoscaling.HorizontalPodAutoscalerList{Items: c.hpas}, nil - }, - }, - } - if err := gatherAndCompare(hc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/job.go b/collectors/job.go deleted file mode 100644 index 69742e4901..0000000000 --- a/collectors/job.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - v1batch "k8s.io/api/batch/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descJobLabelsName = "kube_job_labels" - descJobLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descJobLabelsDefaultLabels = []string{"namespace", "job"} - - descJobLabels = prometheus.NewDesc( - descJobLabelsName, - descJobLabelsHelp, - descJobLabelsDefaultLabels, nil, - ) - - descJobInfo = prometheus.NewDesc( - "kube_job_info", - "Information about job.", - []string{"namespace", "job"}, nil, - ) - descJobCreated = prometheus.NewDesc( - "kube_job_created", - "Unix creation timestamp", - []string{"namespace", "job"}, nil, - ) - descJobSpecParallelism = prometheus.NewDesc( - "kube_job_spec_parallelism", - "The maximum desired number of pods the job should run at any given time.", - []string{"namespace", "job"}, nil, - ) - descJobSpecCompletions = prometheus.NewDesc( - "kube_job_spec_completions", - "The desired number of successfully finished pods the job should be run with.", - []string{"namespace", "job"}, nil, - ) - descJobSpecActiveDeadlineSeconds = prometheus.NewDesc( - "kube_job_spec_active_deadline_seconds", - "The duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.", - []string{"namespace", "job"}, nil, - ) - descJobStatusSucceeded = prometheus.NewDesc( - "kube_job_status_succeeded", - "The number of pods which reached Phase Succeeded.", - []string{"namespace", "job"}, nil, - ) - descJobStatusFailed = prometheus.NewDesc( - "kube_job_status_failed", - "The number of pods which reached Phase Failed.", - []string{"namespace", "job"}, nil, - ) - descJobStatusActive = prometheus.NewDesc( - "kube_job_status_active", - "The number of actively running pods.", - []string{"namespace", "job"}, nil, - ) - descJobConditionComplete = prometheus.NewDesc( - "kube_job_complete", - "The job has completed its execution.", - []string{"namespace", "job", "condition"}, nil, - ) - descJobConditionFailed = prometheus.NewDesc( - "kube_job_failed", - "The job has failed its execution.", - []string{"namespace", "job", "condition"}, nil, - ) - descJobStatusStartTime = prometheus.NewDesc( - "kube_job_status_start_time", - "StartTime represents time when the job was acknowledged by the Job Manager.", - []string{"namespace", "job"}, nil, - ) - descJobStatusCompletionTime = prometheus.NewDesc( - "kube_job_status_completion_time", - "CompletionTime represents time when the job was completed.", - []string{"namespace", "job"}, nil, - ) -) - -type JobLister func() ([]v1batch.Job, error) - -func (l JobLister) List() ([]v1batch.Job, error) { - return l() -} - -func RegisterJobCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.BatchV1().RESTClient() - glog.Infof("collect job with %s", client.APIVersion()) - - jinfs := NewSharedInformerList(client, "jobs", namespaces, &v1batch.Job{}) - - jobLister := JobLister(func() (jobs []v1batch.Job, err error) { - for _, jinf := range *jinfs { - for _, c := range jinf.GetStore().List() { - jobs = append(jobs, *(c.(*v1batch.Job))) - } - } - return jobs, nil - }) - - registry.MustRegister(&jobCollector{store: jobLister}) - jinfs.Run(context.Background().Done()) -} - -type jobStore interface { - List() (jobs []v1batch.Job, err error) -} - -// jobCollector collects metrics about all jobs in the cluster. -type jobCollector struct { - store jobStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *jobCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descJobInfo - ch <- descJobCreated - ch <- descJobLabels - ch <- descJobSpecParallelism - ch <- descJobSpecCompletions - ch <- descJobSpecActiveDeadlineSeconds - ch <- descJobStatusSucceeded - ch <- descJobStatusFailed - ch <- descJobStatusActive - ch <- descJobConditionComplete - ch <- descJobConditionFailed - ch <- descJobStatusStartTime - ch <- descJobStatusCompletionTime -} - -// Collect implements the prometheus.Collector interface. -func (jc *jobCollector) Collect(ch chan<- prometheus.Metric) { - jobs, err := jc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "job"}).Inc() - glog.Errorf("listing jobs failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "job"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "job"}).Observe(float64(len(jobs))) - for _, j := range jobs { - jc.collectJob(ch, j) - } - - glog.V(4).Infof("collected %d jobs", len(jobs)) -} - -func jobLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descJobLabelsName, - descJobLabelsHelp, - append(descJobLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (jc *jobCollector) collectJob(ch chan<- prometheus.Metric, j v1batch.Job) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{j.Namespace, j.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - addCounter := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{j.Namespace, j.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, v, lv...) - } - - addGauge(descJobInfo, 1) - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(j.Labels) - addGauge(jobLabelsDesc(labelKeys), 1, labelValues...) - - if j.Spec.Parallelism != nil { - addGauge(descJobSpecParallelism, float64(*j.Spec.Parallelism)) - } - - if j.Spec.Completions != nil { - addGauge(descJobSpecCompletions, float64(*j.Spec.Completions)) - } - if !j.CreationTimestamp.IsZero() { - addGauge(descJobCreated, float64(j.CreationTimestamp.Unix())) - } - - if j.Spec.ActiveDeadlineSeconds != nil { - addGauge(descJobSpecActiveDeadlineSeconds, float64(*j.Spec.ActiveDeadlineSeconds)) - } - - addGauge(descJobStatusSucceeded, float64(j.Status.Succeeded)) - addGauge(descJobStatusFailed, float64(j.Status.Failed)) - addGauge(descJobStatusActive, float64(j.Status.Active)) - - if j.Status.StartTime != nil { - addCounter(descJobStatusStartTime, float64(j.Status.StartTime.Unix())) - } - - if j.Status.CompletionTime != nil { - addCounter(descJobStatusCompletionTime, float64(j.Status.CompletionTime.Unix())) - } - - for _, c := range j.Status.Conditions { - switch c.Type { - case v1batch.JobComplete: - addConditionMetrics(ch, descJobConditionComplete, c.Status, j.Namespace, j.Name) - case v1batch.JobFailed: - addConditionMetrics(ch, descJobConditionFailed, c.Status, j.Namespace, j.Name) - } - } -} diff --git a/collectors/job_test.go b/collectors/job_test.go deleted file mode 100644 index 6f4c0a5844..0000000000 --- a/collectors/job_test.go +++ /dev/null @@ -1,263 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - v1batch "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - Parallelism1 int32 = 1 - Completions1 int32 = 1 - ActiveDeadlineSeconds900 int64 = 900 - - RunningJob1StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T12:00:07Z") - SuccessfulJob1StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T12:00:07Z") - FailedJob1StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T14:00:07Z") - SuccessfulJob2StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T12:10:07Z") - - SuccessfulJob1CompletionTime, _ = time.Parse(time.RFC3339, "2017-05-26T13:00:07Z") - FailedJob1CompletionTime, _ = time.Parse(time.RFC3339, "2017-05-26T15:00:07Z") - SuccessfulJob2CompletionTime, _ = time.Parse(time.RFC3339, "2017-05-26T13:10:07Z") -) - -type mockJobStore struct { - f func() ([]v1batch.Job, error) -} - -func (js mockJobStore) List() (jobs []v1batch.Job, err error) { - return js.f() -} - -func TestJobCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_job_created Unix creation timestamp - # TYPE kube_job_created gauge - # HELP kube_job_complete The job has completed its execution. - # TYPE kube_job_complete gauge - # HELP kube_job_failed The job has failed its execution. - # TYPE kube_job_failed gauge - # HELP kube_job_info Information about job. - # TYPE kube_job_info gauge - # HELP kube_job_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_job_labels gauge - # HELP kube_job_spec_active_deadline_seconds The duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. - # TYPE kube_job_spec_active_deadline_seconds gauge - # HELP kube_job_spec_completions The desired number of successfully finished pods the job should be run with. - # TYPE kube_job_spec_completions gauge - # HELP kube_job_spec_parallelism The maximum desired number of pods the job should run at any given time. - # TYPE kube_job_spec_parallelism gauge - # HELP kube_job_status_active The number of actively running pods. - # TYPE kube_job_status_active gauge - # HELP kube_job_status_completion_time CompletionTime represents time when the job was completed. - # TYPE kube_job_status_completion_time counter - # HELP kube_job_status_failed The number of pods which reached Phase Failed. - # TYPE kube_job_status_failed gauge - # HELP kube_job_status_start_time StartTime represents time when the job was acknowledged by the Job Manager. - # TYPE kube_job_status_start_time counter - # HELP kube_job_status_succeeded The number of pods which reached Phase Succeeded. - # TYPE kube_job_status_succeeded gauge - ` - cases := []struct { - jobs []v1batch.Job - want string - }{ - { - jobs: []v1batch.Job{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "RunningJob1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-running-1", - }, - }, - Status: v1batch.JobStatus{ - Active: 1, - Failed: 0, - Succeeded: 0, - CompletionTime: nil, - StartTime: &metav1.Time{Time: RunningJob1StartTime}, - }, - Spec: v1batch.JobSpec{ - ActiveDeadlineSeconds: &ActiveDeadlineSeconds900, - Parallelism: &Parallelism1, - Completions: &Completions1, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "SuccessfulJob1", - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-successful-1", - }, - }, - Status: v1batch.JobStatus{ - Active: 0, - Failed: 0, - Succeeded: 1, - CompletionTime: &metav1.Time{Time: SuccessfulJob1CompletionTime}, - StartTime: &metav1.Time{Time: SuccessfulJob1StartTime}, - Conditions: []v1batch.JobCondition{ - {Type: v1batch.JobComplete, Status: v1.ConditionTrue}, - }, - }, - Spec: v1batch.JobSpec{ - ActiveDeadlineSeconds: &ActiveDeadlineSeconds900, - Parallelism: &Parallelism1, - Completions: &Completions1, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "FailedJob1", - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-failed-1", - }, - }, - Status: v1batch.JobStatus{ - Active: 0, - Failed: 1, - Succeeded: 0, - CompletionTime: &metav1.Time{Time: FailedJob1CompletionTime}, - StartTime: &metav1.Time{Time: FailedJob1StartTime}, - Conditions: []v1batch.JobCondition{ - {Type: v1batch.JobFailed, Status: v1.ConditionTrue}, - }, - }, - Spec: v1batch.JobSpec{ - ActiveDeadlineSeconds: &ActiveDeadlineSeconds900, - Parallelism: &Parallelism1, - Completions: &Completions1, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "SuccessfulJob2NoActiveDeadlineSeconds", - Namespace: "ns1", - Generation: 1, - Labels: map[string]string{ - "app": "example-successful-2", - }, - }, - Status: v1batch.JobStatus{ - Active: 0, - Failed: 0, - Succeeded: 1, - CompletionTime: &metav1.Time{Time: SuccessfulJob2CompletionTime}, - StartTime: &metav1.Time{Time: SuccessfulJob2StartTime}, - Conditions: []v1batch.JobCondition{ - {Type: v1batch.JobComplete, Status: v1.ConditionTrue}, - }, - }, - Spec: v1batch.JobSpec{ - ActiveDeadlineSeconds: nil, - Parallelism: &Parallelism1, - Completions: &Completions1, - }, - }, - }, - want: metadata + ` - kube_job_created{job="RunningJob1",namespace="ns1"} 1.5e+09 - - kube_job_complete{condition="false",job="SuccessfulJob1",namespace="ns1"} 0 - kube_job_complete{condition="false",job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 - - kube_job_complete{condition="true",job="SuccessfulJob1",namespace="ns1"} 1 - kube_job_complete{condition="true",job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 - - kube_job_complete{condition="unknown",job="SuccessfulJob1",namespace="ns1"} 0 - kube_job_complete{condition="unknown",job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 - - kube_job_failed{condition="false",job="FailedJob1",namespace="ns1"} 0 - - kube_job_failed{condition="true",job="FailedJob1",namespace="ns1"} 1 - - kube_job_failed{condition="unknown",job="FailedJob1",namespace="ns1"} 0 - - kube_job_info{job="RunningJob1",namespace="ns1"} 1 - kube_job_info{job="SuccessfulJob1",namespace="ns1"} 1 - kube_job_info{job="FailedJob1",namespace="ns1"} 1 - kube_job_info{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 - - kube_job_labels{job="FailedJob1",label_app="example-failed-1",namespace="ns1"} 1 - kube_job_labels{job="RunningJob1",label_app="example-running-1",namespace="ns1"} 1 - kube_job_labels{job="SuccessfulJob1",label_app="example-successful-1",namespace="ns1"} 1 - kube_job_labels{job="SuccessfulJob2NoActiveDeadlineSeconds",label_app="example-successful-2",namespace="ns1"} 1 - - - kube_job_spec_active_deadline_seconds{job="RunningJob1",namespace="ns1"} 900 - kube_job_spec_active_deadline_seconds{job="SuccessfulJob1",namespace="ns1"} 900 - kube_job_spec_active_deadline_seconds{job="FailedJob1",namespace="ns1"} 900 - - kube_job_spec_completions{job="RunningJob1",namespace="ns1"} 1 - kube_job_spec_completions{job="SuccessfulJob1",namespace="ns1"} 1 - kube_job_spec_completions{job="FailedJob1",namespace="ns1"} 1 - kube_job_spec_completions{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 - - kube_job_spec_parallelism{job="RunningJob1",namespace="ns1"} 1 - kube_job_spec_parallelism{job="SuccessfulJob1",namespace="ns1"} 1 - kube_job_spec_parallelism{job="FailedJob1",namespace="ns1"} 1 - kube_job_spec_parallelism{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 - - kube_job_status_active{job="RunningJob1",namespace="ns1"} 1 - kube_job_status_active{job="SuccessfulJob1",namespace="ns1"} 0 - kube_job_status_active{job="FailedJob1",namespace="ns1"} 0 - kube_job_status_active{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 - - kube_job_status_completion_time{job="SuccessfulJob1",namespace="ns1"} 1.495803607e+09 - kube_job_status_completion_time{job="FailedJob1",namespace="ns1"} 1.495810807e+09 - kube_job_status_completion_time{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1.495804207e+09 - - kube_job_status_failed{job="RunningJob1",namespace="ns1"} 0 - kube_job_status_failed{job="SuccessfulJob1",namespace="ns1"} 0 - kube_job_status_failed{job="FailedJob1",namespace="ns1"} 1 - kube_job_status_failed{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 - - kube_job_status_start_time{job="RunningJob1",namespace="ns1"} 1.495800007e+09 - kube_job_status_start_time{job="SuccessfulJob1",namespace="ns1"} 1.495800007e+09 - kube_job_status_start_time{job="FailedJob1",namespace="ns1"} 1.495807207e+09 - kube_job_status_start_time{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1.495800607e+09 - - kube_job_status_succeeded{job="RunningJob1",namespace="ns1"} 0 - kube_job_status_succeeded{job="SuccessfulJob1",namespace="ns1"} 1 - kube_job_status_succeeded{job="FailedJob1",namespace="ns1"} 0 - kube_job_status_succeeded{job="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 - `, - }, - } - for _, c := range cases { - jc := &jobCollector{ - store: mockJobStore{ - f: func() ([]v1batch.Job, error) { return c.jobs, nil }, - }, - } - if err := gatherAndCompare(jc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/limitrange.go b/collectors/limitrange.go deleted file mode 100644 index c62cf72531..0000000000 --- a/collectors/limitrange.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descLimitRange = prometheus.NewDesc( - "kube_limitrange", - "Information about limit range.", - []string{ - "limitrange", - "namespace", - "resource", - "type", - "constraint", - }, nil, - ) - - descLimitRangeCreated = prometheus.NewDesc( - "kube_limitrange_created", - "Unix creation timestamp", - []string{"limitrange", "namespace"}, nil, - ) -) - -type LimitRangeLister func() (v1.LimitRangeList, error) - -func (l LimitRangeLister) List() (v1.LimitRangeList, error) { - return l() -} - -func RegisterLimitRangeCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect limitrange with %s", client.APIVersion()) - - rqinfs := NewSharedInformerList(client, "limitranges", namespaces, &v1.LimitRange{}) - - limitRangeLister := LimitRangeLister(func() (ranges v1.LimitRangeList, err error) { - for _, rqinf := range *rqinfs { - for _, rq := range rqinf.GetStore().List() { - ranges.Items = append(ranges.Items, *(rq.(*v1.LimitRange))) - } - } - return ranges, nil - }) - - registry.MustRegister(&limitRangeCollector{store: limitRangeLister}) - rqinfs.Run(context.Background().Done()) -} - -type limitRangeStore interface { - List() (v1.LimitRangeList, error) -} - -// limitRangeCollector collects metrics about all limit ranges in the cluster. -type limitRangeCollector struct { - store limitRangeStore -} - -// Describe implements the prometheus.Collector interface. -func (lrc *limitRangeCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descLimitRange - ch <- descLimitRangeCreated -} - -// Collect implements the prometheus.Collector interface. -func (lrc *limitRangeCollector) Collect(ch chan<- prometheus.Metric) { - limitRangeCollector, err := lrc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "limitrange"}).Inc() - glog.Errorf("listing limit ranges failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "limitrange"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "limitrange"}).Observe(float64(len(limitRangeCollector.Items))) - for _, rq := range limitRangeCollector.Items { - lrc.collectLimitRange(ch, rq) - } - - glog.V(4).Infof("collected %d limitranges", len(limitRangeCollector.Items)) -} - -func (lrc *limitRangeCollector) collectLimitRange(ch chan<- prometheus.Metric, rq v1.LimitRange) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{rq.Name, rq.Namespace}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - if !rq.CreationTimestamp.IsZero() { - addGauge(descLimitRangeCreated, float64(rq.CreationTimestamp.Unix())) - } - - rawLimitRanges := rq.Spec.Limits - for _, rawLimitRange := range rawLimitRanges { - for resource, min := range rawLimitRange.Min { - addGauge(descLimitRange, float64(min.MilliValue())/1000, string(resource), string(rawLimitRange.Type), "min") - } - - for resource, max := range rawLimitRange.Max { - addGauge(descLimitRange, float64(max.MilliValue())/1000, string(resource), string(rawLimitRange.Type), "max") - } - - for resource, df := range rawLimitRange.Default { - addGauge(descLimitRange, float64(df.MilliValue())/1000, string(resource), string(rawLimitRange.Type), "default") - } - - for resource, dfR := range rawLimitRange.DefaultRequest { - addGauge(descLimitRange, float64(dfR.MilliValue())/1000, string(resource), string(rawLimitRange.Type), "defaultRequest") - } - - for resource, mLR := range rawLimitRange.MaxLimitRequestRatio { - addGauge(descLimitRange, float64(mLR.MilliValue())/1000, string(resource), string(rawLimitRange.Type), "maxLimitRequestRatio") - } - - } - -} diff --git a/collectors/limitrange_test.go b/collectors/limitrange_test.go deleted file mode 100644 index 3dd92bc7c9..0000000000 --- a/collectors/limitrange_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockLimitRangeStore struct { - list func() (v1.LimitRangeList, error) -} - -func (ns mockLimitRangeStore) List() (v1.LimitRangeList, error) { - return ns.list() -} - -func TestLimitRangeollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - testMemory := "2.1G" - testMemoryQuantity := resource.MustParse(testMemory) - const metadata = ` - # HELP kube_limitrange_created Unix creation timestamp - # TYPE kube_limitrange_created gauge - # HELP kube_limitrange Information about limit range. - # TYPE kube_limitrange gauge - ` - cases := []struct { - ranges []v1.LimitRange - metrics []string // which metrics should be checked - want string - }{ - { - ranges: []v1.LimitRange{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "quotaTest", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "testNS", - }, - Spec: v1.LimitRangeSpec{ - Limits: []v1.LimitRangeItem{ - { - Type: v1.LimitTypePod, - Max: map[v1.ResourceName]resource.Quantity{ - v1.ResourceMemory: testMemoryQuantity, - }, - Min: map[v1.ResourceName]resource.Quantity{ - v1.ResourceMemory: testMemoryQuantity, - }, - Default: map[v1.ResourceName]resource.Quantity{ - v1.ResourceMemory: testMemoryQuantity, - }, - DefaultRequest: map[v1.ResourceName]resource.Quantity{ - v1.ResourceMemory: testMemoryQuantity, - }, - MaxLimitRequestRatio: map[v1.ResourceName]resource.Quantity{ - v1.ResourceMemory: testMemoryQuantity, - }, - }, - }, - }, - }, - }, - want: metadata + ` - kube_limitrange_created{limitrange="quotaTest",namespace="testNS"} 1.5e+09 - kube_limitrange{limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod",constraint="min"} 2.1e+09 - kube_limitrange{limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod",constraint="max"} 2.1e+09 - kube_limitrange{limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod",constraint="default"} 2.1e+09 - kube_limitrange{limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod",constraint="defaultRequest"} 2.1e+09 - kube_limitrange{limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod",constraint="maxLimitRequestRatio"} 2.1e+09 - `, - }, - } - for _, c := range cases { - dc := &limitRangeCollector{ - store: &mockLimitRangeStore{ - list: func() (v1.LimitRangeList, error) { - return v1.LimitRangeList{Items: c.ranges}, nil - }, - }, - } - if err := gatherAndCompare(dc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/namespace.go b/collectors/namespace.go deleted file mode 100644 index 0e3cf3c1b1..0000000000 --- a/collectors/namespace.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descNamespaceLabelsName = "kube_namespace_labels" - descNamespaceLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descNamespaceLabelsDefaultLabels = []string{"namespace"} - - descNamespaceAnnotationsName = "kube_namespace_annotations" - descNamespaceAnnotationsHelp = "Kubernetes annotations converted to Prometheus labels." - descNamespaceAnnotationsDefaultLabels = []string{"namespace"} - - descNamespaceCreated = prometheus.NewDesc( - "kube_namespace_created", - "Unix creation timestamp", - []string{"namespace"}, nil, - ) - - descNamespaceLabels = prometheus.NewDesc( - descNamespaceLabelsName, - descNamespaceLabelsHelp, - descNamespaceLabelsDefaultLabels, nil, - ) - - descNamespaceAnnotations = prometheus.NewDesc( - descNamespaceAnnotationsName, - descNamespaceAnnotationsHelp, - descNamespaceAnnotationsDefaultLabels, nil, - ) - - descNamespacePhase = prometheus.NewDesc( - "kube_namespace_status_phase", - "kubernetes namespace status phase.", - []string{ - "namespace", - "phase", - }, nil, - ) -) - -// NamespaceLister define NamespaceLister type -type NamespaceLister func() ([]v1.Namespace, error) - -// List return namespace list -func (l NamespaceLister) List() ([]v1.Namespace, error) { - return l() -} - -// RegisterNamespaceCollector registry namespace collector -func RegisterNamespaceCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect namespace with %s", client.APIVersion()) - - nsinfs := NewSharedInformerList(client, "namespaces", []string{metav1.NamespaceAll}, &v1.Namespace{}) - - namespaceLister := NamespaceLister(func() (namespaces []v1.Namespace, err error) { - for _, nsinf := range *nsinfs { - for _, ns := range nsinf.GetStore().List() { - namespaces = append(namespaces, *(ns.(*v1.Namespace))) - } - } - return namespaces, nil - }) - - registry.MustRegister(&namespaceCollector{store: namespaceLister}) - nsinfs.Run(context.Background().Done()) -} - -type namespaceStore interface { - List() ([]v1.Namespace, error) -} - -// namespaceCollector collects metrics about all namespace in the cluster. -type namespaceCollector struct { - store namespaceStore -} - -// Describe implements the prometheus.Collector interface. -func (nsc *namespaceCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descNamespaceCreated - ch <- descNamespaceLabels - ch <- descNamespaceAnnotations - ch <- descNamespacePhase -} - -// Collect implements the prometheus.Collector interface. -func (nsc *namespaceCollector) Collect(ch chan<- prometheus.Metric) { - nsls, err := nsc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "namespace"}).Inc() - glog.Errorf("listing namespace failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "namespace"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "namespace"}).Observe(float64(len(nsls))) - for _, rq := range nsls { - nsc.collectNamespace(ch, rq) - } - - glog.V(4).Infof("collected %d namespaces", len(nsls)) -} - -func (nsc *namespaceCollector) collectNamespace(ch chan<- prometheus.Metric, ns v1.Namespace) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{ns.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - - addGauge(descNamespacePhase, boolFloat64(ns.Status.Phase == v1.NamespaceActive), string(v1.NamespaceActive)) - addGauge(descNamespacePhase, boolFloat64(ns.Status.Phase == v1.NamespaceTerminating), string(v1.NamespaceTerminating)) - - if !ns.CreationTimestamp.IsZero() { - addGauge(descNamespaceCreated, float64(ns.CreationTimestamp.Unix())) - } - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(ns.Labels) - addGauge(namespaceLabelsDesc(labelKeys), 1, labelValues...) - - annnotationKeys, annotationValues := kubeAnnotationsToPrometheusAnnotations(ns.Annotations) - addGauge(namespaceAnnotationsDesc(annnotationKeys), 1, annotationValues...) -} - -func namespaceLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descNamespaceLabelsName, - descNamespaceLabelsHelp, - append(descNamespaceLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func namespaceAnnotationsDesc(annotationKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descNamespaceAnnotationsName, - descNamespaceAnnotationsHelp, - append(descNamespaceAnnotationsDefaultLabels, annotationKeys...), - nil, - ) -} diff --git a/collectors/namespace_test.go b/collectors/namespace_test.go deleted file mode 100644 index 542ee7e082..0000000000 --- a/collectors/namespace_test.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockNamespaceStore struct { - list func() ([]v1.Namespace, error) -} - -func (ns mockNamespaceStore) List() ([]v1.Namespace, error) { - return ns.list() -} - -func TestNamespaceCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_namespace_created Unix creation timestamp - # TYPE kube_namespace_created gauge - # HELP kube_namespace_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_namespace_labels gauge - # HELP kube_namespace_annotations Kubernetes annotations converted to Prometheus labels. - # TYPE kube_namespace_annotations gauge - # HELP kube_namespace_status_phase kubernetes namespace status phase. - # TYPE kube_namespace_status_phase gauge - ` - - cases := []struct { - ns []v1.Namespace - metrics []string // which metrics should be checked - want string - }{ - { - ns: []v1.Namespace{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "nsActiveTest", - }, - Spec: v1.NamespaceSpec{ - Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, - }, - Status: v1.NamespaceStatus{ - Phase: v1.NamespaceActive, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "nsTerminateTest", - }, - Spec: v1.NamespaceSpec{ - Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, - }, - Status: v1.NamespaceStatus{ - Phase: v1.NamespaceTerminating, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "ns1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Labels: map[string]string{ - "app": "example1", - }, - Annotations: map[string]string{ - "app": "example1", - }, - }, - Spec: v1.NamespaceSpec{ - Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, - }, - Status: v1.NamespaceStatus{ - Phase: v1.NamespaceActive, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "ns2", - Labels: map[string]string{ - "app": "example2", - "l2": "label2", - }, - Annotations: map[string]string{ - "app": "example2", - "l2": "label2", - }, - }, - Spec: v1.NamespaceSpec{ - Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, - }, - Status: v1.NamespaceStatus{ - Phase: v1.NamespaceActive, - }, - }, - }, - - want: metadata + ` - kube_namespace_created{namespace="ns1"} 1.5e+09 - kube_namespace_labels{label_app="example1",namespace="ns1"} 1 - kube_namespace_labels{label_app="example2",label_l2="label2",namespace="ns2"} 1 - kube_namespace_labels{namespace="nsActiveTest"} 1 - kube_namespace_labels{namespace="nsTerminateTest"} 1 - kube_namespace_annotations{annotation_app="example1",namespace="ns1"} 1 - kube_namespace_annotations{annotation_app="example2",annotation_l2="label2",namespace="ns2"} 1 - kube_namespace_annotations{namespace="nsActiveTest"} 1 - kube_namespace_annotations{namespace="nsTerminateTest"} 1 - kube_namespace_status_phase{namespace="ns1",phase="Active"} 1 - kube_namespace_status_phase{namespace="ns1",phase="Terminating"} 0 - kube_namespace_status_phase{namespace="ns2",phase="Active"} 1 - kube_namespace_status_phase{namespace="ns2",phase="Terminating"} 0 - kube_namespace_status_phase{namespace="nsActiveTest",phase="Active"} 1 - kube_namespace_status_phase{namespace="nsActiveTest",phase="Terminating"} 0 - kube_namespace_status_phase{namespace="nsTerminateTest",phase="Active"} 0 - kube_namespace_status_phase{namespace="nsTerminateTest",phase="Terminating"} 1 - `, - }, - } - for _, c := range cases { - nsc := &namespaceCollector{ - store: mockNamespaceStore{ - list: func() ([]v1.Namespace, error) { return c.ns, nil }, - }, - } - if err := gatherAndCompare(nsc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/node.go b/collectors/node.go deleted file mode 100644 index 08c1e78398..0000000000 --- a/collectors/node.go +++ /dev/null @@ -1,293 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descNodeLabelsName = "kube_node_labels" - descNodeLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descNodeLabelsDefaultLabels = []string{"node"} - - descNodeInfo = prometheus.NewDesc( - "kube_node_info", - "Information about a cluster node.", - []string{ - "node", - "kernel_version", - "os_image", - "container_runtime_version", - "kubelet_version", - "kubeproxy_version", - "provider_id", - }, nil, - ) - - descNodeCreated = prometheus.NewDesc( - "kube_node_created", - "Unix creation timestamp", - []string{"node"}, nil, - ) - - descNodeLabels = prometheus.NewDesc( - descNodeLabelsName, - descNodeLabelsHelp, - descNodeLabelsDefaultLabels, nil, - ) - - descNodeSpecUnschedulable = prometheus.NewDesc( - "kube_node_spec_unschedulable", - "Whether a node can schedule new pods.", - []string{"node"}, nil, - ) - - descNodeSpecTaint = prometheus.NewDesc( - "kube_node_spec_taint", - "The taint of a cluster node.", - []string{"node", "key", "value", "effect"}, nil, - ) - - descNodeStatusCondition = prometheus.NewDesc( - "kube_node_status_condition", - "The condition of a cluster node.", - []string{"node", "condition", "status"}, nil, - ) - - descNodeStatusPhase = prometheus.NewDesc( - "kube_node_status_phase", - "The phase the node is currently in.", - []string{"node", "phase"}, nil, - ) - - descNodeStatusCapacityPods = prometheus.NewDesc( - "kube_node_status_capacity_pods", - "The total pod resources of the node.", - []string{"node"}, nil, - ) - descNodeStatusCapacityCPU = prometheus.NewDesc( - "kube_node_status_capacity_cpu_cores", - "The total CPU resources of the node.", - []string{"node"}, nil, - ) - descNodeStatusCapacityNvidiaGPU = prometheus.NewDesc( - "kube_node_status_capacity_nvidia_gpu_cards", - "The total Nvidia GPU resources of the node.", - []string{"node"}, nil, - ) - descNodeStatusCapacityMemory = prometheus.NewDesc( - "kube_node_status_capacity_memory_bytes", - "The total memory resources of the node.", - []string{"node"}, nil, - ) - - descNodeStatusAllocatablePods = prometheus.NewDesc( - "kube_node_status_allocatable_pods", - "The pod resources of a node that are available for scheduling.", - []string{"node"}, nil, - ) - descNodeStatusAllocatableCPU = prometheus.NewDesc( - "kube_node_status_allocatable_cpu_cores", - "The CPU resources of a node that are available for scheduling.", - []string{"node"}, nil, - ) - descNodeStatusAllocatableNvidiaGPU = prometheus.NewDesc( - "kube_node_status_allocatable_nvidia_gpu_cards", - "The Nvidia GPU resources of a node that are available for scheduling.", - []string{"node"}, nil, - ) - descNodeStatusAllocatableMemory = prometheus.NewDesc( - "kube_node_status_allocatable_memory_bytes", - "The memory resources of a node that are available for scheduling.", - []string{"node"}, nil, - ) -) - -type NodeLister func() (v1.NodeList, error) - -func (l NodeLister) List() (v1.NodeList, error) { - return l() -} - -func RegisterNodeCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect node with %s", client.APIVersion()) - - ninfs := NewSharedInformerList(client, "nodes", []string{metav1.NamespaceAll}, &v1.Node{}) - - nodeLister := NodeLister(func() (machines v1.NodeList, err error) { - for _, ninf := range *ninfs { - for _, m := range ninf.GetStore().List() { - machines.Items = append(machines.Items, *(m.(*v1.Node))) - } - } - return machines, nil - }) - - registry.MustRegister(&nodeCollector{store: nodeLister}) - ninfs.Run(context.Background().Done()) -} - -type nodeStore interface { - List() (v1.NodeList, error) -} - -// nodeCollector collects metrics about all nodes in the cluster. -type nodeCollector struct { - store nodeStore -} - -// Describe implements the prometheus.Collector interface. -func (nc *nodeCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descNodeInfo - ch <- descNodeCreated - ch <- descNodeLabels - ch <- descNodeSpecUnschedulable - ch <- descNodeSpecTaint - ch <- descNodeStatusCondition - ch <- descNodeStatusPhase - ch <- descNodeStatusCapacityCPU - ch <- descNodeStatusCapacityNvidiaGPU - ch <- descNodeStatusCapacityMemory - ch <- descNodeStatusCapacityPods - ch <- descNodeStatusAllocatableCPU - ch <- descNodeStatusAllocatableNvidiaGPU - ch <- descNodeStatusAllocatableMemory - ch <- descNodeStatusAllocatablePods -} - -// Collect implements the prometheus.Collector interface. -func (nc *nodeCollector) Collect(ch chan<- prometheus.Metric) { - nodes, err := nc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "node"}).Inc() - glog.Errorf("listing nodes failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "node"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "node"}).Observe(float64(len(nodes.Items))) - for _, n := range nodes.Items { - nc.collectNode(ch, n) - } - - glog.V(4).Infof("collected %d nodes", len(nodes.Items)) -} - -func nodeLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descNodeLabelsName, - descNodeLabelsHelp, - append(descNodeLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (nc *nodeCollector) collectNode(ch chan<- prometheus.Metric, n v1.Node) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{n.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - // NOTE: the instrumentation API requires providing label values in order of declaration - // in the metric descriptor. Be careful when making modifications. - addGauge(descNodeInfo, 1, - n.Status.NodeInfo.KernelVersion, - n.Status.NodeInfo.OSImage, - n.Status.NodeInfo.ContainerRuntimeVersion, - n.Status.NodeInfo.KubeletVersion, - n.Status.NodeInfo.KubeProxyVersion, - n.Spec.ProviderID, - ) - if !n.CreationTimestamp.IsZero() { - addGauge(descNodeCreated, float64(n.CreationTimestamp.Unix())) - } - labelKeys, labelValues := kubeLabelsToPrometheusLabels(n.Labels) - addGauge(nodeLabelsDesc(labelKeys), 1, labelValues...) - - addGauge(descNodeSpecUnschedulable, boolFloat64(n.Spec.Unschedulable)) - - // Collect node taints - for _, taint := range n.Spec.Taints { - // Taints are applied to repel pods from nodes that do not have a corresponding - // toleration. Many node conditions are optionally reflected as taints - // by the node controller in order to simplify scheduling constraints. - addGauge(descNodeSpecTaint, 1, taint.Key, taint.Value, string(taint.Effect)) - } - - // Collect node conditions and while default to false. - for _, c := range n.Status.Conditions { - // This all-in-one metric family contains all conditions for extensibility. - // Third party plugin may report customized condition for cluster node - // (e.g. node-problem-detector), and Kubernetes may add new core - // conditions in future. - addConditionMetrics(ch, descNodeStatusCondition, c.Status, n.Name, string(c.Type)) - } - - // Set current phase to 1, others to 0 if it is set. - if p := n.Status.Phase; p != "" { - addGauge(descNodeStatusPhase, boolFloat64(p == v1.NodePending), string(v1.NodePending)) - addGauge(descNodeStatusPhase, boolFloat64(p == v1.NodeRunning), string(v1.NodeRunning)) - addGauge(descNodeStatusPhase, boolFloat64(p == v1.NodeTerminated), string(v1.NodeTerminated)) - } - - // Add capacity and allocatable resources if they are set. - addResource := func(d *prometheus.Desc, res v1.ResourceList, n v1.ResourceName) { - if v, ok := res[n]; ok { - addGauge(d, float64(v.MilliValue())/1000) - } - } - addResource(descNodeStatusCapacityCPU, n.Status.Capacity, v1.ResourceCPU) - addResource(descNodeStatusCapacityNvidiaGPU, n.Status.Capacity, v1.ResourceNvidiaGPU) - addResource(descNodeStatusCapacityMemory, n.Status.Capacity, v1.ResourceMemory) - addResource(descNodeStatusCapacityPods, n.Status.Capacity, v1.ResourcePods) - - addResource(descNodeStatusAllocatableCPU, n.Status.Allocatable, v1.ResourceCPU) - addResource(descNodeStatusAllocatableNvidiaGPU, n.Status.Allocatable, v1.ResourceNvidiaGPU) - addResource(descNodeStatusAllocatableMemory, n.Status.Allocatable, v1.ResourceMemory) - addResource(descNodeStatusAllocatablePods, n.Status.Allocatable, v1.ResourcePods) -} - -// addConditionMetrics generates one metric for each possible node condition -// status. For this function to work properly, the last label in the metric -// description must be the condition. -func addConditionMetrics(ch chan<- prometheus.Metric, desc *prometheus.Desc, cs v1.ConditionStatus, lv ...string) { - ch <- prometheus.MustNewConstMetric( - desc, prometheus.GaugeValue, boolFloat64(cs == v1.ConditionTrue), - append(lv, "true")..., - ) - ch <- prometheus.MustNewConstMetric( - desc, prometheus.GaugeValue, boolFloat64(cs == v1.ConditionFalse), - append(lv, "false")..., - ) - ch <- prometheus.MustNewConstMetric( - desc, prometheus.GaugeValue, boolFloat64(cs == v1.ConditionUnknown), - append(lv, "unknown")..., - ) -} - -func boolFloat64(b bool) float64 { - if b { - return 1 - } - return 0 -} diff --git a/collectors/node_test.go b/collectors/node_test.go deleted file mode 100644 index a4eff23563..0000000000 --- a/collectors/node_test.go +++ /dev/null @@ -1,304 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockNodeStore struct { - list func() (v1.NodeList, error) -} - -func (ns mockNodeStore) List() (v1.NodeList, error) { - return ns.list() -} - -func TestNodeCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_node_created Unix creation timestamp - # TYPE kube_node_created gauge - # HELP kube_node_info Information about a cluster node. - # TYPE kube_node_info gauge - # HELP kube_node_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_node_labels gauge - # HELP kube_node_spec_unschedulable Whether a node can schedule new pods. - # TYPE kube_node_spec_unschedulable gauge - # HELP kube_node_spec_taint The taint of a cluster node. - # TYPE kube_node_spec_taint gauge - # TYPE kube_node_status_phase gauge - # HELP kube_node_status_phase The phase the node is currently in. - # TYPE kube_node_status_capacity_pods gauge - # HELP kube_node_status_capacity_pods The total pod resources of the node. - # TYPE kube_node_status_capacity_cpu_cores gauge - # HELP kube_node_status_capacity_cpu_cores The total CPU resources of the node. - # HELP kube_node_status_capacity_nvidia_gpu_cards The total Nvidia GPU resources of the node. - # TYPE kube_node_status_capacity_nvidia_gpu_cards gauge - # TYPE kube_node_status_capacity_memory_bytes gauge - # HELP kube_node_status_capacity_memory_bytes The total memory resources of the node. - # TYPE kube_node_status_allocatable_pods gauge - # HELP kube_node_status_allocatable_pods The pod resources of a node that are available for scheduling. - # TYPE kube_node_status_allocatable_cpu_cores gauge - # HELP kube_node_status_allocatable_cpu_cores The CPU resources of a node that are available for scheduling. - # HELP kube_node_status_allocatable_nvidia_gpu_cards The Nvidia GPU resources of a node that are available for scheduling. - # TYPE kube_node_status_allocatable_nvidia_gpu_cards gauge - # TYPE kube_node_status_allocatable_memory_bytes gauge - # HELP kube_node_status_allocatable_memory_bytes The memory resources of a node that are available for scheduling. - # HELP kube_node_status_condition The condition of a cluster node. - # TYPE kube_node_status_condition gauge - ` - cases := []struct { - nodes []v1.Node - metrics []string // which metrics should be checked - want string - }{ - // Verify populating base metrics and that metrics for unset fields are skipped. - { - nodes: []v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.1", - }, - Status: v1.NodeStatus{ - NodeInfo: v1.NodeSystemInfo{ - KernelVersion: "kernel", - KubeletVersion: "kubelet", - KubeProxyVersion: "kubeproxy", - OSImage: "osimage", - ContainerRuntimeVersion: "rkt", - }, - }, - Spec: v1.NodeSpec{ - ProviderID: "provider://i-uniqueid", - }, - }, - }, - want: metadata + ` - kube_node_info{container_runtime_version="rkt",kernel_version="kernel",kubelet_version="kubelet",kubeproxy_version="kubeproxy",node="127.0.0.1",os_image="osimage",provider_id="provider://i-uniqueid"} 1 - kube_node_labels{node="127.0.0.1"} 1 - kube_node_spec_unschedulable{node="127.0.0.1"} 0 - `, - }, - // Verify resource metrics. - { - nodes: []v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Labels: map[string]string{ - "type": "master", - }, - }, - Spec: v1.NodeSpec{ - Unschedulable: true, - ProviderID: "provider://i-randomidentifier", - }, - Status: v1.NodeStatus{ - NodeInfo: v1.NodeSystemInfo{ - KernelVersion: "kernel", - KubeletVersion: "kubelet", - KubeProxyVersion: "kubeproxy", - OSImage: "osimage", - ContainerRuntimeVersion: "rkt", - }, - Capacity: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4.3"), - v1.ResourceNvidiaGPU: resource.MustParse("4"), - v1.ResourceMemory: resource.MustParse("2G"), - v1.ResourcePods: resource.MustParse("1000"), - }, - Allocatable: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("3"), - v1.ResourceNvidiaGPU: resource.MustParse("2"), - v1.ResourceMemory: resource.MustParse("1G"), - v1.ResourcePods: resource.MustParse("555"), - }, - }, - }, - }, - want: metadata + ` - kube_node_created{node="127.0.0.1"} 1.5e+09 - kube_node_info{container_runtime_version="rkt",kernel_version="kernel",kubelet_version="kubelet",kubeproxy_version="kubeproxy",node="127.0.0.1",os_image="osimage",provider_id="provider://i-randomidentifier"} 1 - kube_node_labels{label_type="master",node="127.0.0.1"} 1 - kube_node_spec_unschedulable{node="127.0.0.1"} 1 - kube_node_status_capacity_cpu_cores{node="127.0.0.1"} 4.3 - kube_node_status_capacity_nvidia_gpu_cards{node="127.0.0.1"} 4 - kube_node_status_capacity_memory_bytes{node="127.0.0.1"} 2e9 - kube_node_status_capacity_pods{node="127.0.0.1"} 1000 - kube_node_status_allocatable_cpu_cores{node="127.0.0.1"} 3 - kube_node_status_allocatable_nvidia_gpu_cards{node="127.0.0.1"} 2 - kube_node_status_allocatable_memory_bytes{node="127.0.0.1"} 1e9 - kube_node_status_allocatable_pods{node="127.0.0.1"} 555 - `, - }, - // Verify phase enumerations. - { - nodes: []v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.1", - }, - Status: v1.NodeStatus{ - Phase: v1.NodeRunning, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.2", - }, - Status: v1.NodeStatus{ - Phase: v1.NodePending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.3", - }, - Status: v1.NodeStatus{ - Phase: v1.NodeTerminated, - }, - }, - }, - want: metadata + ` - kube_node_status_phase{node="127.0.0.1",phase="Terminated"} 0 - kube_node_status_phase{node="127.0.0.1",phase="Running"} 1 - kube_node_status_phase{node="127.0.0.1",phase="Pending"} 0 - kube_node_status_phase{node="127.0.0.2",phase="Terminated"} 0 - kube_node_status_phase{node="127.0.0.2",phase="Running"} 0 - kube_node_status_phase{node="127.0.0.2",phase="Pending"} 1 - kube_node_status_phase{node="127.0.0.3",phase="Terminated"} 1 - kube_node_status_phase{node="127.0.0.3",phase="Running"} 0 - kube_node_status_phase{node="127.0.0.3",phase="Pending"} 0 - `, - metrics: []string{"kube_node_status_phase"}, - }, - // Verify StatusCondition - { - nodes: []v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.1", - }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, - {Type: v1.NodeReady, Status: v1.ConditionTrue}, - {Type: v1.NodeConditionType("CustomizedType"), Status: v1.ConditionTrue}, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.2", - }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionUnknown}, - {Type: v1.NodeReady, Status: v1.ConditionUnknown}, - {Type: v1.NodeConditionType("CustomizedType"), Status: v1.ConditionUnknown}, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.3", - }, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse}, - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - {Type: v1.NodeConditionType("CustomizedType"), Status: v1.ConditionFalse}, - }, - }, - }, - }, - want: metadata + ` - kube_node_status_condition{node="127.0.0.1",condition="NetworkUnavailable",status="true"} 1 - kube_node_status_condition{node="127.0.0.1",condition="NetworkUnavailable",status="false"} 0 - kube_node_status_condition{node="127.0.0.1",condition="NetworkUnavailable",status="unknown"} 0 - kube_node_status_condition{node="127.0.0.2",condition="NetworkUnavailable",status="true"} 0 - kube_node_status_condition{node="127.0.0.2",condition="NetworkUnavailable",status="false"} 0 - kube_node_status_condition{node="127.0.0.2",condition="NetworkUnavailable",status="unknown"} 1 - kube_node_status_condition{node="127.0.0.3",condition="NetworkUnavailable",status="true"} 0 - kube_node_status_condition{node="127.0.0.3",condition="NetworkUnavailable",status="false"} 1 - kube_node_status_condition{node="127.0.0.3",condition="NetworkUnavailable",status="unknown"} 0 - kube_node_status_condition{node="127.0.0.1",condition="Ready",status="true"} 1 - kube_node_status_condition{node="127.0.0.1",condition="Ready",status="false"} 0 - kube_node_status_condition{node="127.0.0.1",condition="Ready",status="unknown"} 0 - kube_node_status_condition{node="127.0.0.2",condition="Ready",status="true"} 0 - kube_node_status_condition{node="127.0.0.2",condition="Ready",status="false"} 0 - kube_node_status_condition{node="127.0.0.2",condition="Ready",status="unknown"} 1 - kube_node_status_condition{node="127.0.0.3",condition="Ready",status="true"} 0 - kube_node_status_condition{node="127.0.0.3",condition="Ready",status="false"} 1 - kube_node_status_condition{node="127.0.0.3",condition="Ready",status="unknown"} 0 - kube_node_status_condition{node="127.0.0.1",condition="CustomizedType",status="true"} 1 - kube_node_status_condition{node="127.0.0.1",condition="CustomizedType",status="false"} 0 - kube_node_status_condition{node="127.0.0.1",condition="CustomizedType",status="unknown"} 0 - kube_node_status_condition{node="127.0.0.2",condition="CustomizedType",status="true"} 0 - kube_node_status_condition{node="127.0.0.2",condition="CustomizedType",status="false"} 0 - kube_node_status_condition{node="127.0.0.2",condition="CustomizedType",status="unknown"} 1 - kube_node_status_condition{node="127.0.0.3",condition="CustomizedType",status="true"} 0 - kube_node_status_condition{node="127.0.0.3",condition="CustomizedType",status="false"} 1 - kube_node_status_condition{node="127.0.0.3",condition="CustomizedType",status="unknown"} 0 - `, - metrics: []string{"kube_node_status_condition"}, - }, - // Verify SpecTaints - { - nodes: []v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "127.0.0.1", - }, - Spec: v1.NodeSpec{ - Taints: []v1.Taint{ - {Key: "node.kubernetes.io/memory-pressure", Value: "true", Effect: v1.TaintEffectPreferNoSchedule}, - {Key: "Accelerated", Value: "gpu", Effect: v1.TaintEffectPreferNoSchedule}, - {Key: "Dedicated", Effect: v1.TaintEffectPreferNoSchedule}, - }, - }, - }, - }, - want: metadata + ` - kube_node_spec_taint{effect="PreferNoSchedule",key="Dedicated",node="127.0.0.1",value=""} 1 - kube_node_spec_taint{effect="PreferNoSchedule",key="Accelerated",node="127.0.0.1",value="gpu"} 1 - kube_node_spec_taint{effect="PreferNoSchedule",key="node.kubernetes.io/memory-pressure",node="127.0.0.1",value="true"} 1 - `, - metrics: []string{"kube_node_spec_taint"}, - }, - } - for _, c := range cases { - dc := &nodeCollector{ - store: &mockNodeStore{ - list: func() (v1.NodeList, error) { - return v1.NodeList{Items: c.nodes}, nil - }, - }, - } - if err := gatherAndCompare(dc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/persistentvolume.go b/collectors/persistentvolume.go deleted file mode 100644 index e4478d0ce0..0000000000 --- a/collectors/persistentvolume.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descPersistentVolumeLabelsName = "kube_persistentvolume_labels" - descPersistentVolumeLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descPersistentVolumeLabelsDefaultLabels = []string{"persistentvolume"} - - descPersistentVolumeLabels = prometheus.NewDesc( - descPersistentVolumeLabelsName, - descPersistentVolumeLabelsHelp, - descPersistentVolumeLabelsDefaultLabels, nil, - ) - - descPersistentVolumeStatusPhase = prometheus.NewDesc( - "kube_persistentvolume_status_phase", - "The phase indicates if a volume is available, bound to a claim, or released by a claim.", - []string{ - "persistentvolume", - "phase", - }, nil, - ) - - descPersistentVolumeInfo = prometheus.NewDesc( - "kube_persistentvolume_info", - "Information about persistentvolume.", - []string{"persistentvolume", "storageclass"}, nil, - ) -) - -type PersistentVolumeLister func() (v1.PersistentVolumeList, error) - -func (pvl PersistentVolumeLister) List() (v1.PersistentVolumeList, error) { - return pvl() -} - -func RegisterPersistentVolumeCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect persistentvolume with %s", client.APIVersion()) - - pvinfs := NewSharedInformerList(client, "persistentvolumes", []string{v1.NamespaceAll}, &v1.PersistentVolume{}) - - persistentVolumeLister := PersistentVolumeLister(func() (pvs v1.PersistentVolumeList, err error) { - for _, pvinf := range *pvinfs { - for _, pv := range pvinf.GetStore().List() { - pvs.Items = append(pvs.Items, *(pv.(*v1.PersistentVolume))) - } - } - return pvs, nil - }) - - registry.MustRegister(&persistentVolumeCollector{store: persistentVolumeLister}) - pvinfs.Run(context.Background().Done()) -} - -type persistentVolumeStore interface { - List() (v1.PersistentVolumeList, error) -} - -// persistentVolumeCollector collects metrics about all persistentVolumes in the cluster. -type persistentVolumeCollector struct { - store persistentVolumeStore -} - -// Describe implements the prometheus.Collector interface. -func (collector *persistentVolumeCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descPersistentVolumeStatusPhase - ch <- descPersistentVolumeInfo - ch <- descPersistentVolumeLabels -} - -func persistentVolumeLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descPersistentVolumeLabelsName, - descPersistentVolumeLabelsHelp, - append(descPersistentVolumeLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -// Collect implements the prometheus.Collector interface. -func (collector *persistentVolumeCollector) Collect(ch chan<- prometheus.Metric) { - persistentVolumeCollector, err := collector.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "persistentvolume"}).Inc() - glog.Errorf("listing persistentVolume failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "persistentvolume"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "persistentvolume"}).Observe(float64(len(persistentVolumeCollector.Items))) - for _, pv := range persistentVolumeCollector.Items { - collector.collectPersistentVolume(ch, pv) - } - - glog.V(4).Infof("collected %d persistentvolumes", len(persistentVolumeCollector.Items)) -} - -func (collector *persistentVolumeCollector) collectPersistentVolume(ch chan<- prometheus.Metric, pv v1.PersistentVolume) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{pv.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(pv.Labels) - addGauge(persistentVolumeLabelsDesc(labelKeys), 1, labelValues...) - - addGauge(descPersistentVolumeInfo, 1, pv.Spec.StorageClassName) - // Set current phase to 1, others to 0 if it is set. - if p := pv.Status.Phase; p != "" { - addGauge(descPersistentVolumeStatusPhase, boolFloat64(p == v1.VolumePending), string(v1.VolumePending)) - addGauge(descPersistentVolumeStatusPhase, boolFloat64(p == v1.VolumeAvailable), string(v1.VolumeAvailable)) - addGauge(descPersistentVolumeStatusPhase, boolFloat64(p == v1.VolumeBound), string(v1.VolumeBound)) - addGauge(descPersistentVolumeStatusPhase, boolFloat64(p == v1.VolumeReleased), string(v1.VolumeReleased)) - addGauge(descPersistentVolumeStatusPhase, boolFloat64(p == v1.VolumeFailed), string(v1.VolumeFailed)) - } -} diff --git a/collectors/persistentvolume_test.go b/collectors/persistentvolume_test.go deleted file mode 100644 index 7bbedf66fe..0000000000 --- a/collectors/persistentvolume_test.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockPersistentVolumeStore struct { - list func() (v1.PersistentVolumeList, error) -} - -func (ns mockPersistentVolumeStore) List() (v1.PersistentVolumeList, error) { - return ns.list() -} - -func TestPersistentVolumeCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_persistentvolume_status_phase The phase indicates if a volume is available, bound to a claim, or released by a claim. - # TYPE kube_persistentvolume_status_phase gauge - # HELP kube_persistentvolume_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_persistentvolume_labels gauge - # HELP kube_persistentvolume_info Information about persistentvolume. - # TYPE kube_persistentvolume_info gauge - ` - cases := []struct { - pvs []v1.PersistentVolume - metrics []string // which metrics should be checked - want string - }{ - // Verify phase enumerations. - { - pvs: []v1.PersistentVolume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-pending", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumePending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-available", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumeAvailable, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-bound", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumeBound, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-released", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumeReleased, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-failed", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumeFailed, - }, - }, - }, - want: metadata + ` - kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Available"} 1 - kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Bound"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Failed"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Pending"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Released"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Available"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Bound"} 1 - kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Failed"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Pending"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Released"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Available"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Bound"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Failed"} 1 - kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Pending"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Released"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Available"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Bound"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Failed"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Pending"} 1 - kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Released"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Available"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Bound"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Failed"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Pending"} 0 - kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Released"} 1 - `, - metrics: []string{"kube_persistentvolume_status_phase"}, - }, - { - pvs: []v1.PersistentVolume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-pending", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumePending, - }, - Spec: v1.PersistentVolumeSpec{ - StorageClassName: "test", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv-available", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumeAvailable, - }, - }, - }, - want: metadata + ` - kube_persistentvolume_info{persistentvolume="test-pv-available",storageclass=""} 1 - kube_persistentvolume_info{persistentvolume="test-pv-pending",storageclass="test"} 1 - `, - metrics: []string{"kube_persistentvolume_info"}, - }, - { - pvs: []v1.PersistentVolume{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-labeled-pv", - Labels: map[string]string{ - "app": "mysql-server", - }, - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumePending, - }, - Spec: v1.PersistentVolumeSpec{ - StorageClassName: "test", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-unlabeled-pv", - }, - Status: v1.PersistentVolumeStatus{ - Phase: v1.VolumeAvailable, - }, - }, - }, - want: metadata + ` - kube_persistentvolume_labels{persistentvolume="test-unlabeled-pv"} 1 - kube_persistentvolume_labels{label_app="mysql-server",persistentvolume="test-labeled-pv"} 1 - `, - metrics: []string{"kube_persistentvolume_labels"}, - }, - } - for _, c := range cases { - dc := &persistentVolumeCollector{ - store: &mockPersistentVolumeStore{ - list: func() (v1.PersistentVolumeList, error) { - return v1.PersistentVolumeList{Items: c.pvs}, nil - }, - }, - } - if err := gatherAndCompare(dc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/persistentvolumeclaim.go b/collectors/persistentvolumeclaim.go deleted file mode 100644 index d5ee3f1e98..0000000000 --- a/collectors/persistentvolumeclaim.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descPersistentVolumeClaimLabelsName = "kube_persistentvolumeclaim_labels" - descPersistentVolumeClaimLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descPersistentVolumeClaimLabelsDefaultLabels = []string{"namespace", "persistentvolumeclaim"} - - descPersistentVolumeClaimLabels = prometheus.NewDesc( - descPersistentVolumeClaimLabelsName, - descPersistentVolumeClaimLabelsHelp, - descPersistentVolumeClaimLabelsDefaultLabels, nil, - ) - - descPersistentVolumeClaimInfo = prometheus.NewDesc( - "kube_persistentvolumeclaim_info", - "Information about persistent volume claim.", - []string{ - "namespace", - "persistentvolumeclaim", - "storageclass", - "volumename", - }, nil, - ) - descPersistentVolumeClaimStatusPhase = prometheus.NewDesc( - "kube_persistentvolumeclaim_status_phase", - "The phase the persistent volume claim is currently in.", - []string{ - "namespace", - "persistentvolumeclaim", - "phase", - }, nil, - ) - descPersistentVolumeClaimResourceRequestsStorage = prometheus.NewDesc( - "kube_persistentvolumeclaim_resource_requests_storage_bytes", - "The capacity of storage requested by the persistent volume claim.", - []string{ - "namespace", - "persistentvolumeclaim", - }, nil, - ) -) - -type PersistentVolumeClaimLister func() (v1.PersistentVolumeClaimList, error) - -func (l PersistentVolumeClaimLister) List() (v1.PersistentVolumeClaimList, error) { - return l() -} - -func RegisterPersistentVolumeClaimCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect persistentvolumeclaim with %s", client.APIVersion()) - - pvcinfs := NewSharedInformerList(client, "persistentvolumeclaims", namespaces, &v1.PersistentVolumeClaim{}) - - persistentVolumeClaimLister := PersistentVolumeClaimLister(func() (pvcs v1.PersistentVolumeClaimList, err error) { - for _, pvcinf := range *pvcinfs { - for _, pvc := range pvcinf.GetStore().List() { - pvcs.Items = append(pvcs.Items, *(pvc.(*v1.PersistentVolumeClaim))) - } - } - return pvcs, nil - }) - - registry.MustRegister(&persistentVolumeClaimCollector{store: persistentVolumeClaimLister}) - pvcinfs.Run(context.Background().Done()) -} - -type persistentVolumeClaimStore interface { - List() (v1.PersistentVolumeClaimList, error) -} - -// persistentVolumeClaimCollector collects metrics about all persistentVolumeClaims in the cluster. -type persistentVolumeClaimCollector struct { - store persistentVolumeClaimStore -} - -// Describe implements the prometheus.Collector interface. -func (collector *persistentVolumeClaimCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descPersistentVolumeClaimLabels - ch <- descPersistentVolumeClaimInfo - ch <- descPersistentVolumeClaimStatusPhase - ch <- descPersistentVolumeClaimResourceRequestsStorage -} - -func persistentVolumeClaimLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descPersistentVolumeClaimLabelsName, - descPersistentVolumeClaimLabelsHelp, - append(descPersistentVolumeClaimLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -// Collect implements the prometheus.Collector interface. -func (collector *persistentVolumeClaimCollector) Collect(ch chan<- prometheus.Metric) { - persistentVolumeClaimCollector, err := collector.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "persistentvolumeclaim"}).Inc() - glog.Errorf("listing persistent volume claims failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "persistentvolumeclaim"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "persistentvolumeclaim"}).Observe(float64(len(persistentVolumeClaimCollector.Items))) - for _, pvc := range persistentVolumeClaimCollector.Items { - collector.collectPersistentVolumeClaim(ch, pvc) - } - - glog.V(4).Infof("collected %d persistentvolumeclaims", len(persistentVolumeClaimCollector.Items)) -} - -// getPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func getPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - // Special non-empty string to indicate absence of storage class. - return "" -} - -func (collector *persistentVolumeClaimCollector) collectPersistentVolumeClaim(ch chan<- prometheus.Metric, pvc v1.PersistentVolumeClaim) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{pvc.Namespace, pvc.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(pvc.Labels) - addGauge(persistentVolumeClaimLabelsDesc(labelKeys), 1, labelValues...) - - storageClassName := getPersistentVolumeClaimClass(&pvc) - volumeName := pvc.Spec.VolumeName - addGauge(descPersistentVolumeClaimInfo, 1, storageClassName, volumeName) - - // Set current phase to 1, others to 0 if it is set. - if p := pvc.Status.Phase; p != "" { - addGauge(descPersistentVolumeClaimStatusPhase, boolFloat64(p == v1.ClaimLost), string(v1.ClaimLost)) - addGauge(descPersistentVolumeClaimStatusPhase, boolFloat64(p == v1.ClaimBound), string(v1.ClaimBound)) - addGauge(descPersistentVolumeClaimStatusPhase, boolFloat64(p == v1.ClaimPending), string(v1.ClaimPending)) - } - - if storage, ok := pvc.Spec.Resources.Requests[v1.ResourceStorage]; ok { - addGauge(descPersistentVolumeClaimResourceRequestsStorage, float64(storage.Value())) - } -} diff --git a/collectors/persistentvolumeclaim_test.go b/collectors/persistentvolumeclaim_test.go deleted file mode 100644 index 33e92d578a..0000000000 --- a/collectors/persistentvolumeclaim_test.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockPersistentVolumeClaimStore struct { - list func() (v1.PersistentVolumeClaimList, error) -} - -func (ns mockPersistentVolumeClaimStore) List() (v1.PersistentVolumeClaimList, error) { - return ns.list() -} - -func TestPersistentVolumeClaimCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_persistentvolumeclaim_info Information about persistent volume claim. - # TYPE kube_persistentvolumeclaim_info gauge - # HELP kube_persistentvolumeclaim_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_persistentvolumeclaim_labels gauge - # HELP kube_persistentvolumeclaim_status_phase The phase the persistent volume claim is currently in. - # TYPE kube_persistentvolumeclaim_status_phase gauge - # HELP kube_persistentvolumeclaim_resource_requests_storage_bytes The capacity of storage requested by the persistent volume claim. - # TYPE kube_persistentvolumeclaim_resource_requests_storage_bytes gauge - ` - storageClassName := "rbd" - cases := []struct { - pvcs []v1.PersistentVolumeClaim - metrics []string // which metrics should be checked - want string - }{ - // Verify phase enumerations. - { - pvcs: []v1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "mysql-data", - Namespace: "default", - Labels: map[string]string{ - "app": "mysql-server", - }, - }, - Spec: v1.PersistentVolumeClaimSpec{ - StorageClassName: &storageClassName, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("1Gi"), - }, - }, - VolumeName: "pvc-mysql-data", - }, - Status: v1.PersistentVolumeClaimStatus{ - Phase: v1.ClaimBound, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "prometheus-data", - Namespace: "default", - }, - Spec: v1.PersistentVolumeClaimSpec{ - StorageClassName: &storageClassName, - VolumeName: "pvc-prometheus-data", - }, - Status: v1.PersistentVolumeClaimStatus{ - Phase: v1.ClaimPending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "mongo-data", - }, - Status: v1.PersistentVolumeClaimStatus{ - Phase: v1.ClaimLost, - }, - }, - }, - want: metadata + ` - kube_persistentvolumeclaim_info{namespace="",persistentvolumeclaim="mongo-data",storageclass="",volumename=""} 1 - kube_persistentvolumeclaim_info{namespace="default",persistentvolumeclaim="mysql-data",storageclass="rbd",volumename="pvc-mysql-data"} 1 - kube_persistentvolumeclaim_info{namespace="default",persistentvolumeclaim="prometheus-data",storageclass="rbd",volumename="pvc-prometheus-data"} 1 - kube_persistentvolumeclaim_status_phase{namespace="",persistentvolumeclaim="mongo-data",phase="Bound"} 0 - kube_persistentvolumeclaim_status_phase{namespace="",persistentvolumeclaim="mongo-data",phase="Lost"} 1 - kube_persistentvolumeclaim_status_phase{namespace="",persistentvolumeclaim="mongo-data",phase="Pending"} 0 - kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="mysql-data",phase="Bound"} 1 - kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="mysql-data",phase="Lost"} 0 - kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="mysql-data",phase="Pending"} 0 - kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="prometheus-data",phase="Bound"} 0 - kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="prometheus-data",phase="Lost"} 0 - kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="prometheus-data",phase="Pending"} 1 - kube_persistentvolumeclaim_resource_requests_storage_bytes{namespace="default",persistentvolumeclaim="mysql-data"} 1.073741824e+09 - kube_persistentvolumeclaim_labels{namespace="",persistentvolumeclaim="mongo-data"} 1 - kube_persistentvolumeclaim_labels{namespace="default",persistentvolumeclaim="prometheus-data"} 1 - kube_persistentvolumeclaim_labels{label_app="mysql-server",namespace="default",persistentvolumeclaim="mysql-data"} 1 - `, - metrics: []string{"kube_persistentvolumeclaim_info", "kube_persistentvolumeclaim_status_phase", "kube_persistentvolumeclaim_resource_requests_storage_bytes", "kube_persistentvolumeclaim_labels"}, - }, - } - for _, c := range cases { - dc := &persistentVolumeClaimCollector{ - store: &mockPersistentVolumeClaimStore{ - list: func() (v1.PersistentVolumeClaimList, error) { - return v1.PersistentVolumeClaimList{Items: c.pvcs}, nil - }, - }, - } - if err := gatherAndCompare(dc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/pod.go b/collectors/pod.go deleted file mode 100644 index 4f45c67ca0..0000000000 --- a/collectors/pod.go +++ /dev/null @@ -1,464 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "regexp" - "strconv" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/util/node" -) - -var ( - invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) - descPodLabelsName = "kube_pod_labels" - descPodLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descPodLabelsDefaultLabels = []string{"namespace", "pod"} - containerWaitingReasons = []string{"ContainerCreating", "CrashLoopBackOff", "ErrImagePull", "ImagePullBackOff"} - containerTerminatedReasons = []string{"OOMKilled", "Completed", "Error", "ContainerCannotRun"} - - descPodInfo = prometheus.NewDesc( - "kube_pod_info", - "Information about pod.", - []string{"namespace", "pod", "host_ip", "pod_ip", "node", "created_by_kind", "created_by_name"}, nil, - ) - - descPodStartTime = prometheus.NewDesc( - "kube_pod_start_time", - "Start time in unix timestamp for a pod.", - []string{"namespace", "pod"}, nil, - ) - - descPodCompletionTime = prometheus.NewDesc( - "kube_pod_completion_time", - "Completion time in unix timestamp for a pod.", - []string{"namespace", "pod"}, nil, - ) - - descPodOwner = prometheus.NewDesc( - "kube_pod_owner", - "Information about the Pod's owner.", - []string{"namespace", "pod", "owner_kind", "owner_name", "owner_is_controller"}, nil, - ) - - descPodLabels = prometheus.NewDesc( - descPodLabelsName, - descPodLabelsHelp, - descPodLabelsDefaultLabels, nil, - ) - - descPodCreated = prometheus.NewDesc( - "kube_pod_created", - "Unix creation timestamp", - []string{"namespace", "pod"}, nil, - ) - - descPodStatusPhase = prometheus.NewDesc( - "kube_pod_status_phase", - "The pods current phase.", - []string{"namespace", "pod", "phase"}, nil, - ) - - descPodStatusReady = prometheus.NewDesc( - "kube_pod_status_ready", - "Describes whether the pod is ready to serve requests.", - []string{"namespace", "pod", "condition"}, nil, - ) - - descPodStatusScheduled = prometheus.NewDesc( - "kube_pod_status_scheduled", - "Describes the status of the scheduling process for the pod.", - []string{"namespace", "pod", "condition"}, nil, - ) - - descPodContainerInfo = prometheus.NewDesc( - "kube_pod_container_info", - "Information about a container in a pod.", - []string{"namespace", "pod", "container", "image", "image_id", "container_id"}, nil, - ) - - descPodContainerStatusWaiting = prometheus.NewDesc( - "kube_pod_container_status_waiting", - "Describes whether the container is currently in waiting state.", - []string{"namespace", "pod", "container"}, nil, - ) - - descPodContainerStatusWaitingReason = prometheus.NewDesc( - "kube_pod_container_status_waiting_reason", - "Describes the reason the container is currently in waiting state.", - []string{"namespace", "pod", "container", "reason"}, nil, - ) - - descPodContainerStatusRunning = prometheus.NewDesc( - "kube_pod_container_status_running", - "Describes whether the container is currently in running state.", - []string{"namespace", "pod", "container"}, nil, - ) - - descPodContainerStatusTerminated = prometheus.NewDesc( - "kube_pod_container_status_terminated", - "Describes whether the container is currently in terminated state.", - []string{"namespace", "pod", "container"}, nil, - ) - - descPodContainerStatusTerminatedReason = prometheus.NewDesc( - "kube_pod_container_status_terminated_reason", - "Describes the reason the container is currently in terminated state.", - []string{"namespace", "pod", "container", "reason"}, nil, - ) - - descPodContainerStatusReady = prometheus.NewDesc( - "kube_pod_container_status_ready", - "Describes whether the containers readiness check succeeded.", - []string{"namespace", "pod", "container"}, nil, - ) - - descPodContainerStatusRestarts = prometheus.NewDesc( - "kube_pod_container_status_restarts_total", - "The number of container restarts per container.", - []string{"namespace", "pod", "container"}, nil, - ) - - descPodContainerResourceRequestsCpuCores = prometheus.NewDesc( - "kube_pod_container_resource_requests_cpu_cores", - "The number of requested cpu cores by a container.", - []string{"namespace", "pod", "container", "node"}, nil, - ) - - descPodContainerResourceRequestsMemoryBytes = prometheus.NewDesc( - "kube_pod_container_resource_requests_memory_bytes", - "The number of requested memory bytes by a container.", - []string{"namespace", "pod", "container", "node"}, nil, - ) - - descPodContainerResourceLimitsCpuCores = prometheus.NewDesc( - "kube_pod_container_resource_limits_cpu_cores", - "The limit on cpu cores to be used by a container.", - []string{"namespace", "pod", "container", "node"}, nil, - ) - - descPodContainerResourceLimitsMemoryBytes = prometheus.NewDesc( - "kube_pod_container_resource_limits_memory_bytes", - "The limit on memory to be used by a container in bytes.", - []string{"namespace", "pod", "container", "node"}, nil, - ) - - descPodContainerResourceRequestsNvidiaGPUDevices = prometheus.NewDesc( - "kube_pod_container_resource_requests_nvidia_gpu_devices", - "The number of requested gpu devices by a container.", - []string{"namespace", "pod", "container", "node"}, nil, - ) - - descPodContainerResourceLimitsNvidiaGPUDevices = prometheus.NewDesc( - "kube_pod_container_resource_limits_nvidia_gpu_devices", - "The limit on gpu devices to be used by a container.", - []string{"namespace", "pod", "container", "node"}, nil, - ) - - descPodSpecVolumesPersistentVolumeClaimsInfo = prometheus.NewDesc( - "kube_pod_spec_volumes_persistentvolumeclaims_info", - "Information about persistentvolumeclaim volumes in a pod.", - []string{"namespace", "pod", "volume", "persistentvolumeclaim"}, nil, - ) - - descPodSpecVolumesPersistentVolumeClaimsReadOnly = prometheus.NewDesc( - "kube_pod_spec_volumes_persistentvolumeclaims_readonly", - "Describes whether a persistentvolumeclaim is mounted read only.", - []string{"namespace", "pod", "volume", "persistentvolumeclaim"}, nil, - ) -) - -type PodLister func() ([]v1.Pod, error) - -func (l PodLister) List() ([]v1.Pod, error) { - return l() -} - -func RegisterPodCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect pod with %s", client.APIVersion()) - - pinfs := NewSharedInformerList(client, "pods", namespaces, &v1.Pod{}) - - podLister := PodLister(func() (pods []v1.Pod, err error) { - for _, pinf := range *pinfs { - for _, m := range pinf.GetStore().List() { - pods = append(pods, *m.(*v1.Pod)) - } - } - return pods, nil - }) - - registry.MustRegister(&podCollector{store: podLister}) - pinfs.Run(context.Background().Done()) -} - -type podStore interface { - List() (pods []v1.Pod, err error) -} - -// podCollector collects metrics about all pods in the cluster. -type podCollector struct { - store podStore -} - -// Describe implements the prometheus.Collector interface. -func (pc *podCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descPodInfo - ch <- descPodStartTime - ch <- descPodCompletionTime - ch <- descPodOwner - ch <- descPodLabels - ch <- descPodCreated - ch <- descPodStatusPhase - ch <- descPodStatusReady - ch <- descPodStatusScheduled - ch <- descPodContainerInfo - ch <- descPodContainerStatusWaiting - ch <- descPodContainerStatusWaitingReason - ch <- descPodContainerStatusRunning - ch <- descPodContainerStatusTerminated - ch <- descPodContainerStatusTerminatedReason - ch <- descPodContainerStatusReady - ch <- descPodContainerStatusRestarts - ch <- descPodContainerResourceRequestsCpuCores - ch <- descPodContainerResourceRequestsMemoryBytes - ch <- descPodContainerResourceLimitsCpuCores - ch <- descPodContainerResourceLimitsMemoryBytes - ch <- descPodContainerResourceRequestsNvidiaGPUDevices - ch <- descPodContainerResourceLimitsNvidiaGPUDevices - ch <- descPodSpecVolumesPersistentVolumeClaimsInfo - ch <- descPodSpecVolumesPersistentVolumeClaimsReadOnly -} - -// Collect implements the prometheus.Collector interface. -func (pc *podCollector) Collect(ch chan<- prometheus.Metric) { - pods, err := pc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "pod"}).Inc() - glog.Errorf("listing pods failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "pod"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "pod"}).Observe(float64(len(pods))) - for _, p := range pods { - pc.collectPod(ch, p) - } - - glog.V(4).Infof("collected %d pods", len(pods)) -} - -func kubeLabelsToPrometheusLabels(labels map[string]string) ([]string, []string) { - labelKeys := make([]string, len(labels)) - labelValues := make([]string, len(labels)) - i := 0 - for k, v := range labels { - labelKeys[i] = "label_" + sanitizeLabelName(k) - labelValues[i] = v - i++ - } - return labelKeys, labelValues -} - -func kubeAnnotationsToPrometheusAnnotations(annotations map[string]string) ([]string, []string) { - annotationKeys := make([]string, len(annotations)) - annotationValues := make([]string, len(annotations)) - i := 0 - for k, v := range annotations { - annotationKeys[i] = "annotation_" + sanitizeLabelName(k) - annotationValues[i] = v - i++ - } - return annotationKeys, annotationValues -} - -func sanitizeLabelName(s string) string { - return invalidLabelCharRE.ReplaceAllString(s, "_") -} - -func podLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descPodLabelsName, - descPodLabelsHelp, - append(descPodLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (pc *podCollector) collectPod(ch chan<- prometheus.Metric, p v1.Pod) { - nodeName := p.Spec.NodeName - addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) { - lv = append([]string{p.Namespace, p.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, t, v, lv...) - } - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - addConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - addCounter := func(desc *prometheus.Desc, v float64, lv ...string) { - addConstMetric(desc, prometheus.CounterValue, v, lv...) - } - - createdBy := metav1.GetControllerOf(&p) - createdByKind := "" - createdByName := "" - if createdBy != nil { - if createdBy.Kind != "" { - createdByKind = createdBy.Kind - } - if createdBy.Name != "" { - createdByName = createdBy.Name - } - } - - if p.Status.StartTime != nil { - addGauge(descPodStartTime, float64((*(p.Status.StartTime)).Unix())) - } - - addGauge(descPodInfo, 1, p.Status.HostIP, p.Status.PodIP, nodeName, createdByKind, createdByName) - - owners := p.GetOwnerReferences() - if len(owners) == 0 { - addGauge(descPodOwner, 1, "", "", "") - } else { - for _, owner := range owners { - if owner.Controller != nil { - addGauge(descPodOwner, 1, owner.Kind, owner.Name, strconv.FormatBool(*owner.Controller)) - } else { - addGauge(descPodOwner, 1, owner.Kind, owner.Name, "false") - } - } - } - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(p.Labels) - addGauge(podLabelsDesc(labelKeys), 1, labelValues...) - - if phase := p.Status.Phase; phase != "" { - addGauge(descPodStatusPhase, boolFloat64(phase == v1.PodPending), string(v1.PodPending)) - addGauge(descPodStatusPhase, boolFloat64(phase == v1.PodSucceeded), string(v1.PodSucceeded)) - addGauge(descPodStatusPhase, boolFloat64(phase == v1.PodFailed), string(v1.PodFailed)) - // This logic is directly copied from: https://github.com/kubernetes/kubernetes/blob/d39bfa0d138368bbe72b0eaf434501dcb4ec9908/pkg/printers/internalversion/printers.go#L597-L601 - // For more info, please go to: https://github.com/kubernetes/kube-state-metrics/issues/410 - addGauge(descPodStatusPhase, boolFloat64(phase == v1.PodRunning && !(p.DeletionTimestamp != nil && p.Status.Reason == node.NodeUnreachablePodReason)), string(v1.PodRunning)) - addGauge(descPodStatusPhase, boolFloat64(phase == v1.PodUnknown || (p.DeletionTimestamp != nil && p.Status.Reason == node.NodeUnreachablePodReason)), string(v1.PodUnknown)) - } - - if !p.CreationTimestamp.IsZero() { - addGauge(descPodCreated, float64(p.CreationTimestamp.Unix())) - } - - for _, c := range p.Status.Conditions { - switch c.Type { - case v1.PodReady: - addConditionMetrics(ch, descPodStatusReady, c.Status, p.Namespace, p.Name) - case v1.PodScheduled: - addConditionMetrics(ch, descPodStatusScheduled, c.Status, p.Namespace, p.Name) - } - } - - waitingReason := func(cs v1.ContainerStatus, reason string) bool { - if cs.State.Waiting == nil { - return false - } - return cs.State.Waiting.Reason == reason - } - - terminationReason := func(cs v1.ContainerStatus, reason string) bool { - if cs.State.Terminated == nil { - return false - } - return cs.State.Terminated.Reason == reason - } - - var lastFinishTime float64 - - for _, cs := range p.Status.ContainerStatuses { - addGauge(descPodContainerInfo, 1, - cs.Name, cs.Image, cs.ImageID, cs.ContainerID, - ) - addGauge(descPodContainerStatusWaiting, boolFloat64(cs.State.Waiting != nil), cs.Name) - for _, reason := range containerWaitingReasons { - addGauge(descPodContainerStatusWaitingReason, boolFloat64(waitingReason(cs, reason)), cs.Name, reason) - } - addGauge(descPodContainerStatusRunning, boolFloat64(cs.State.Running != nil), cs.Name) - addGauge(descPodContainerStatusTerminated, boolFloat64(cs.State.Terminated != nil), cs.Name) - for _, reason := range containerTerminatedReasons { - addGauge(descPodContainerStatusTerminatedReason, boolFloat64(terminationReason(cs, reason)), cs.Name, reason) - } - addGauge(descPodContainerStatusReady, boolFloat64(cs.Ready), cs.Name) - addCounter(descPodContainerStatusRestarts, float64(cs.RestartCount), cs.Name) - - if cs.State.Terminated != nil { - if lastFinishTime == 0 || lastFinishTime < float64(cs.State.Terminated.FinishedAt.Unix()) { - lastFinishTime = float64(cs.State.Terminated.FinishedAt.Unix()) - } - } - } - - if lastFinishTime > 0 { - addGauge(descPodCompletionTime, lastFinishTime) - } - - for _, c := range p.Spec.Containers { - req := c.Resources.Requests - lim := c.Resources.Limits - - if cpu, ok := req[v1.ResourceCPU]; ok { - addGauge(descPodContainerResourceRequestsCpuCores, float64(cpu.MilliValue())/1000, - c.Name, nodeName) - } - if mem, ok := req[v1.ResourceMemory]; ok { - addGauge(descPodContainerResourceRequestsMemoryBytes, float64(mem.Value()), - c.Name, nodeName) - } - - if gpu, ok := req[v1.ResourceNvidiaGPU]; ok { - addGauge(descPodContainerResourceRequestsNvidiaGPUDevices, float64(gpu.Value()), c.Name, nodeName) - } - - if cpu, ok := lim[v1.ResourceCPU]; ok { - addGauge(descPodContainerResourceLimitsCpuCores, float64(cpu.MilliValue())/1000, - c.Name, nodeName) - } - - if mem, ok := lim[v1.ResourceMemory]; ok { - addGauge(descPodContainerResourceLimitsMemoryBytes, float64(mem.Value()), - c.Name, nodeName) - } - - if gpu, ok := lim[v1.ResourceNvidiaGPU]; ok { - addGauge(descPodContainerResourceLimitsNvidiaGPUDevices, float64(gpu.Value()), c.Name, nodeName) - } - } - - for _, v := range p.Spec.Volumes { - if v.PersistentVolumeClaim != nil { - addGauge(descPodSpecVolumesPersistentVolumeClaimsInfo, 1, v.Name, v.PersistentVolumeClaim.ClaimName) - readOnly := 0.0 - if v.PersistentVolumeClaim.ReadOnly { - readOnly = 1.0 - } - addGauge(descPodSpecVolumesPersistentVolumeClaimsReadOnly, readOnly, v.Name, v.PersistentVolumeClaim.ClaimName) - } - } -} diff --git a/collectors/pod_test.go b/collectors/pod_test.go deleted file mode 100644 index 1a58b6f951..0000000000 --- a/collectors/pod_test.go +++ /dev/null @@ -1,823 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/util/node" -) - -type mockPodStore struct { - f func() ([]v1.Pod, error) -} - -func (ds mockPodStore) List() (pods []v1.Pod, err error) { - return ds.f() -} - -func TestPodCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - var test = true - - startTime := 1501569018 - metav1StartTime := metav1.Unix(int64(startTime), 0) - - const metadata = ` - # HELP kube_pod_created Unix creation timestamp - # TYPE kube_pod_created gauge - # HELP kube_pod_container_info Information about a container in a pod. - # TYPE kube_pod_container_info gauge - # HELP kube_pod_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_pod_labels gauge - # HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded. - # TYPE kube_pod_container_status_ready gauge - # HELP kube_pod_container_status_restarts_total The number of container restarts per container. - # TYPE kube_pod_container_status_restarts_total counter - # HELP kube_pod_container_status_running Describes whether the container is currently in running state. - # TYPE kube_pod_container_status_running gauge - # HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state. - # TYPE kube_pod_container_status_terminated gauge - # HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state. - # TYPE kube_pod_container_status_terminated_reason gauge - # HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state. - # TYPE kube_pod_container_status_waiting gauge - # HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state. - # TYPE kube_pod_container_status_waiting_reason gauge - # HELP kube_pod_info Information about pod. - # TYPE kube_pod_info gauge - # HELP kube_pod_start_time Start time in unix timestamp for a pod. - # TYPE kube_pod_start_time gauge - # HELP kube_pod_completion_time Completion time in unix timestamp for a pod. - # TYPE kube_pod_completion_time gauge - # HELP kube_pod_owner Information about the Pod's owner. - # TYPE kube_pod_owner gauge - # HELP kube_pod_status_phase The pods current phase. - # TYPE kube_pod_status_phase gauge - # HELP kube_pod_status_ready Describes whether the pod is ready to serve requests. - # TYPE kube_pod_status_ready gauge - # HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. - # TYPE kube_pod_status_scheduled gauge - # HELP kube_pod_container_resource_requests_cpu_cores The number of requested cpu cores by a container. - # TYPE kube_pod_container_resource_requests_cpu_cores gauge - # HELP kube_pod_container_resource_requests_memory_bytes The number of requested memory bytes by a container. - # TYPE kube_pod_container_resource_requests_memory_bytes gauge - # HELP kube_pod_container_resource_limits_cpu_cores The limit on cpu cores to be used by a container. - # TYPE kube_pod_container_resource_limits_cpu_cores gauge - # HELP kube_pod_container_resource_limits_memory_bytes The limit on memory to be used by a container in bytes. - # TYPE kube_pod_container_resource_limits_memory_bytes gauge - # HELP kube_pod_container_resource_requests_nvidia_gpu_devices The number of requested gpu devices by a container. - # TYPE kube_pod_container_resource_requests_nvidia_gpu_devices gauge - # HELP kube_pod_container_resource_limits_nvidia_gpu_devices The limit on gpu devices to be used by a container. - # TYPE kube_pod_container_resource_limits_nvidia_gpu_devices gauge - # HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod. - # TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge - # HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only. - # TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge - ` - cases := []struct { - pods []v1.Pod - metrics []string - want string - }{ - { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container1", - Image: "k8s.gcr.io/hyperkube1", - ImageID: "docker://sha256:aaa", - ContainerID: "docker://ab123", - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container2", - Image: "k8s.gcr.io/hyperkube2", - ImageID: "docker://sha256:bbb", - ContainerID: "docker://cd456", - }, - v1.ContainerStatus{ - Name: "container3", - Image: "k8s.gcr.io/hyperkube3", - ImageID: "docker://sha256:ccc", - ContainerID: "docker://ef789", - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_container_info{container="container1",container_id="docker://ab123",image="k8s.gcr.io/hyperkube1",image_id="docker://sha256:aaa",namespace="ns1",pod="pod1"} 1 - kube_pod_container_info{container="container2",container_id="docker://cd456",image="k8s.gcr.io/hyperkube2",image_id="docker://sha256:bbb",namespace="ns2",pod="pod2"} 1 - kube_pod_container_info{container="container3",container_id="docker://ef789",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",namespace="ns2",pod="pod2"} 1 - `, - metrics: []string{"kube_pod_container_info"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container1", - Ready: true, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container2", - Ready: true, - }, - v1.ContainerStatus{ - Name: "container3", - Ready: false, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_container_status_ready{container="container1",namespace="ns1",pod="pod1"} 1 - kube_pod_container_status_ready{container="container2",namespace="ns2",pod="pod2"} 1 - kube_pod_container_status_ready{container="container3",namespace="ns2",pod="pod2"} 0 - `, - metrics: []string{"kube_pod_container_status_ready"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container1", - RestartCount: 0, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container2", - RestartCount: 0, - }, - v1.ContainerStatus{ - Name: "container3", - RestartCount: 1, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_container_status_restarts_total{container="container1",namespace="ns1",pod="pod1"} 0 - kube_pod_container_status_restarts_total{container="container2",namespace="ns2",pod="pod2"} 0 - kube_pod_container_status_restarts_total{container="container3",namespace="ns2",pod="pod2"} 1 - `, - metrics: []string{"kube_pod_container_status_restarts_total"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container1", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, - }, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container2", - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - Reason: "OOMKilled", - }, - }, - }, - v1.ContainerStatus{ - Name: "container3", - State: v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: "ContainerCreating", - }, - }, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod3", - Namespace: "ns3", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container4", - State: v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: "CrashLoopBackOff", - }, - }, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod4", - Namespace: "ns4", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container5", - State: v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: "ImagePullBackOff", - }, - }, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod5", - Namespace: "ns5", - }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - Name: "container6", - State: v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: "ErrImagePull", - }, - }, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_container_status_running{container="container1",namespace="ns1",pod="pod1"} 1 - kube_pod_container_status_running{container="container4",namespace="ns3",pod="pod3"} 0 - kube_pod_container_status_running{container="container5",namespace="ns4",pod="pod4"} 0 - kube_pod_container_status_running{container="container6",namespace="ns5",pod="pod5"} 0 - kube_pod_container_status_running{container="container2",namespace="ns2",pod="pod2"} 0 - kube_pod_container_status_running{container="container3",namespace="ns2",pod="pod2"} 0 - kube_pod_container_status_terminated{container="container1",namespace="ns1",pod="pod1"} 0 - kube_pod_container_status_terminated{container="container4",namespace="ns3",pod="pod3"} 0 - kube_pod_container_status_terminated{container="container5",namespace="ns4",pod="pod4"} 0 - kube_pod_container_status_terminated{container="container6",namespace="ns5",pod="pod5"} 0 - kube_pod_container_status_terminated{container="container2",namespace="ns2",pod="pod2"} 1 - kube_pod_container_status_terminated{container="container3",namespace="ns2",pod="pod2"} 0 - kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="Completed"} 0 - kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="ContainerCannotRun"} 0 - kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="Error"} 0 - kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="OOMKilled"} 0 - kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="Completed"} 0 - kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="ContainerCannotRun"} 0 - kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="Error"} 0 - kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="OOMKilled"} 0 - kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="Completed"} 0 - kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="ContainerCannotRun"} 0 - kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="Error"} 0 - kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="OOMKilled"} 0 - kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="Completed"} 0 - kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="ContainerCannotRun"} 0 - kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="Error"} 0 - kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="OOMKilled"} 0 - kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="Completed"} 0 - kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="ContainerCannotRun"} 0 - kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="Error"} 0 - kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="OOMKilled"} 1 - kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="Completed"} 0 - kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="ContainerCannotRun"} 0 - kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="Error"} 0 - kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="OOMKilled"} 0 - kube_pod_container_status_waiting{container="container1",namespace="ns1",pod="pod1"} 0 - kube_pod_container_status_waiting{container="container2",namespace="ns2",pod="pod2"} 0 - kube_pod_container_status_waiting{container="container3",namespace="ns2",pod="pod2"} 1 - kube_pod_container_status_waiting{container="container4",namespace="ns3",pod="pod3"} 1 - kube_pod_container_status_waiting{container="container5",namespace="ns4",pod="pod4"} 1 - kube_pod_container_status_waiting{container="container6",namespace="ns5",pod="pod5"} 1 - kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="ContainerCreating"} 0 - kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="ImagePullBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="CrashLoopBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="ErrImagePull"} 0 - kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="ContainerCreating"} 0 - kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="ImagePullBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="CrashLoopBackOff"} 1 - kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="ErrImagePull"} 0 - kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="ContainerCreating"} 0 - kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="ImagePullBackOff"} 1 - kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="CrashLoopBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="ErrImagePull"} 0 - kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="ContainerCreating"} 0 - kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="ImagePullBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="CrashLoopBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="ErrImagePull"} 1 - kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="ContainerCreating"} 0 - kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="ImagePullBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="CrashLoopBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="ErrImagePull"} 0 - kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="ContainerCreating"} 1 - kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="ImagePullBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="CrashLoopBackOff"} 0 - kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="ErrImagePull"} 0 - `, - metrics: []string{ - "kube_pod_container_status_running", - "kube_pod_container_status_waiting", - "kube_pod_container_status_waiting_reason", - "kube_pod_container_status_terminated", - "kube_pod_container_status_terminated_reason", - }, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns1", - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - Status: v1.PodStatus{ - HostIP: "1.1.1.1", - PodIP: "1.2.3.4", - StartTime: &metav1StartTime, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - OwnerReferences: []metav1.OwnerReference{ - { - Kind: "ReplicaSet", - Name: "rs-name", - Controller: &test, - }, - }, - }, - Spec: v1.PodSpec{ - NodeName: "node2", - }, - Status: v1.PodStatus{ - HostIP: "1.1.1.1", - PodIP: "2.3.4.5", - ContainerStatuses: []v1.ContainerStatus{ - v1.ContainerStatus{ - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - FinishedAt: metav1.Time{ - Time: time.Unix(1501777018, 0), - }, - }, - }, - }, - v1.ContainerStatus{ - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - FinishedAt: metav1.Time{ - Time: time.Unix(1501888018, 0), - }, - }, - }, - }, - v1.ContainerStatus{ - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - FinishedAt: metav1.Time{ - Time: time.Unix(1501666018, 0), - }, - }, - }, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_created{namespace="ns1",pod="pod1"} 1.5e+09 - kube_pod_info{created_by_kind="",created_by_name="",host_ip="1.1.1.1",namespace="ns1",pod="pod1",node="node1",pod_ip="1.2.3.4"} 1 - kube_pod_info{created_by_kind="ReplicaSet",created_by_name="rs-name",host_ip="1.1.1.1",namespace="ns2",pod="pod2",node="node2",pod_ip="2.3.4.5"} 1 - kube_pod_start_time{namespace="ns1",pod="pod1"} 1501569018 - kube_pod_completion_time{namespace="ns2",pod="pod2"} 1501888018 - kube_pod_owner{namespace="ns1",pod="pod1",owner_kind="",owner_name="",owner_is_controller=""} 1 - kube_pod_owner{namespace="ns2",pod="pod2",owner_kind="ReplicaSet",owner_name="rs-name",owner_is_controller="true"} 1 - `, - metrics: []string{"kube_pod_created", "kube_pod_info", "kube_pod_start_time", "kube_pod_completion_time", "kube_pod_owner"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - Phase: v1.PodRunning, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - Phase: v1.PodPending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod3", - Namespace: "ns3", - }, - Status: v1.PodStatus{ - Phase: v1.PodUnknown, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod4", - Namespace: "ns4", - DeletionTimestamp: &metav1.Time{}, - }, - Status: v1.PodStatus{ - Phase: v1.PodRunning, - Reason: node.NodeUnreachablePodReason, - }, - }, - }, - want: metadata + ` - kube_pod_status_phase{namespace="ns1",phase="Failed",pod="pod1"} 0 - kube_pod_status_phase{namespace="ns1",phase="Pending",pod="pod1"} 0 - kube_pod_status_phase{namespace="ns1",phase="Running",pod="pod1"} 1 - kube_pod_status_phase{namespace="ns1",phase="Succeeded",pod="pod1"} 0 - kube_pod_status_phase{namespace="ns1",phase="Unknown",pod="pod1"} 0 - kube_pod_status_phase{namespace="ns2",phase="Failed",pod="pod2"} 0 - kube_pod_status_phase{namespace="ns2",phase="Pending",pod="pod2"} 1 - kube_pod_status_phase{namespace="ns2",phase="Running",pod="pod2"} 0 - kube_pod_status_phase{namespace="ns2",phase="Succeeded",pod="pod2"} 0 - kube_pod_status_phase{namespace="ns2",phase="Unknown",pod="pod2"} 0 - kube_pod_status_phase{namespace="ns3",phase="Failed",pod="pod3"} 0 - kube_pod_status_phase{namespace="ns3",phase="Pending",pod="pod3"} 0 - kube_pod_status_phase{namespace="ns3",phase="Running",pod="pod3"} 0 - kube_pod_status_phase{namespace="ns3",phase="Succeeded",pod="pod3"} 0 - kube_pod_status_phase{namespace="ns3",phase="Unknown",pod="pod3"} 1 - kube_pod_status_phase{namespace="ns4",phase="Failed",pod="pod4"} 0 - kube_pod_status_phase{namespace="ns4",phase="Pending",pod="pod4"} 0 - kube_pod_status_phase{namespace="ns4",phase="Running",pod="pod4"} 0 - kube_pod_status_phase{namespace="ns4",phase="Succeeded",pod="pod4"} 0 - kube_pod_status_phase{namespace="ns4",phase="Unknown",pod="pod4"} 1 - `, - metrics: []string{"kube_pod_status_phase"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ - v1.PodCondition{ - Type: v1.PodReady, - Status: v1.ConditionTrue, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ - v1.PodCondition{ - Type: v1.PodReady, - Status: v1.ConditionFalse, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_status_ready{condition="false",namespace="ns1",pod="pod1"} 0 - kube_pod_status_ready{condition="false",namespace="ns2",pod="pod2"} 1 - kube_pod_status_ready{condition="true",namespace="ns1",pod="pod1"} 1 - kube_pod_status_ready{condition="true",namespace="ns2",pod="pod2"} 0 - kube_pod_status_ready{condition="unknown",namespace="ns1",pod="pod1"} 0 - kube_pod_status_ready{condition="unknown",namespace="ns2",pod="pod2"} 0 - `, - metrics: []string{"kube_pod_status_ready"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ - v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionTrue, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ - v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_status_scheduled{condition="false",namespace="ns1",pod="pod1"} 0 - kube_pod_status_scheduled{condition="false",namespace="ns2",pod="pod2"} 1 - kube_pod_status_scheduled{condition="true",namespace="ns1",pod="pod1"} 1 - kube_pod_status_scheduled{condition="true",namespace="ns2",pod="pod2"} 0 - kube_pod_status_scheduled{condition="unknown",namespace="ns1",pod="pod1"} 0 - kube_pod_status_scheduled{condition="unknown",namespace="ns2",pod="pod2"} 0 - `, - metrics: []string{"kube_pod_status_scheduled"}, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - }, - Spec: v1.PodSpec{ - NodeName: "node1", - Containers: []v1.Container{ - v1.Container{ - Name: "pod1_con1", - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("200m"), - v1.ResourceMemory: resource.MustParse("100M"), - v1.ResourceNvidiaGPU: resource.MustParse("3"), - }, - Limits: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("200m"), - v1.ResourceMemory: resource.MustParse("100M"), - v1.ResourceNvidiaGPU: resource.MustParse("3"), - }, - }, - }, - v1.Container{ - Name: "pod1_con2", - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("300m"), - v1.ResourceMemory: resource.MustParse("200M"), - v1.ResourceNvidiaGPU: resource.MustParse("2"), - }, - Limits: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("300m"), - v1.ResourceMemory: resource.MustParse("200M"), - v1.ResourceNvidiaGPU: resource.MustParse("2"), - }, - }, - }, - }, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod2", - Namespace: "ns2", - }, - Spec: v1.PodSpec{ - NodeName: "node2", - Containers: []v1.Container{ - v1.Container{ - Name: "pod2_con1", - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("400m"), - v1.ResourceMemory: resource.MustParse("300M"), - v1.ResourceNvidiaGPU: resource.MustParse("3"), - }, - Limits: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("400m"), - v1.ResourceMemory: resource.MustParse("300M"), - v1.ResourceNvidiaGPU: resource.MustParse("3"), - }, - }, - }, - v1.Container{ - Name: "pod2_con2", - Resources: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("500m"), - v1.ResourceMemory: resource.MustParse("400M"), - v1.ResourceNvidiaGPU: resource.MustParse("5"), - }, - Limits: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("500m"), - v1.ResourceMemory: resource.MustParse("400M"), - v1.ResourceNvidiaGPU: resource.MustParse("5"), - }, - }, - }, - // A container without a resource specicication. No metrics will be emitted for that. - v1.Container{ - Name: "pod2_con3", - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_container_resource_requests_cpu_cores{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 0.2 - kube_pod_container_resource_requests_cpu_cores{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 0.3 - kube_pod_container_resource_requests_cpu_cores{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 0.4 - kube_pod_container_resource_requests_cpu_cores{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 0.5 - kube_pod_container_resource_requests_memory_bytes{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 1e+08 - kube_pod_container_resource_requests_memory_bytes{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2e+08 - kube_pod_container_resource_requests_memory_bytes{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3e+08 - kube_pod_container_resource_requests_memory_bytes{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 4e+08 - kube_pod_container_resource_requests_nvidia_gpu_devices{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 3 - kube_pod_container_resource_requests_nvidia_gpu_devices{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2 - kube_pod_container_resource_requests_nvidia_gpu_devices{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3 - kube_pod_container_resource_requests_nvidia_gpu_devices{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 5 - kube_pod_container_resource_limits_cpu_cores{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 0.2 - kube_pod_container_resource_limits_cpu_cores{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 0.3 - kube_pod_container_resource_limits_cpu_cores{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 0.4 - kube_pod_container_resource_limits_cpu_cores{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 0.5 - kube_pod_container_resource_limits_memory_bytes{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 1e+08 - kube_pod_container_resource_limits_memory_bytes{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2e+08 - kube_pod_container_resource_limits_memory_bytes{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3e+08 - kube_pod_container_resource_limits_memory_bytes{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 4e+08 - kube_pod_container_resource_limits_nvidia_gpu_devices{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 3 - kube_pod_container_resource_limits_nvidia_gpu_devices{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2 - kube_pod_container_resource_limits_nvidia_gpu_devices{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3 - kube_pod_container_resource_limits_nvidia_gpu_devices{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 5 - `, - metrics: []string{ - "kube_pod_container_resource_requests_cpu_cores", - "kube_pod_container_resource_requests_memory_bytes", - "kube_pod_container_resource_requests_nvidia_gpu_devices", - "kube_pod_container_resource_limits_cpu_cores", - "kube_pod_container_resource_limits_memory_bytes", - "kube_pod_container_resource_limits_nvidia_gpu_devices", - }, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - Labels: map[string]string{ - "app": "example", - }, - }, - Spec: v1.PodSpec{}, - }, - }, - want: metadata + ` - kube_pod_labels{label_app="example",namespace="ns1",pod="pod1"} 1 - `, - metrics: []string{ - "kube_pod_labels", - }, - }, { - pods: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: "ns1", - Labels: map[string]string{ - "app": "example", - }, - }, - Spec: v1.PodSpec{ - Volumes: []v1.Volume{ - v1.Volume{ - Name: "myvol", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "claim1", - ReadOnly: false, - }, - }, - }, - v1.Volume{ - Name: "my-readonly-vol", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "claim2", - ReadOnly: true, - }, - }, - }, - v1.Volume{ - Name: "not-pvc-vol", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{ - Medium: "memory", - }, - }, - }, - }, - }, - }, - }, - want: metadata + ` - kube_pod_spec_volumes_persistentvolumeclaims_info{namespace="ns1",persistentvolumeclaim="claim1",pod="pod1",volume="myvol"} 1 - kube_pod_spec_volumes_persistentvolumeclaims_info{namespace="ns1",persistentvolumeclaim="claim2",pod="pod1",volume="my-readonly-vol"} 1 - kube_pod_spec_volumes_persistentvolumeclaims_readonly{namespace="ns1",persistentvolumeclaim="claim1",pod="pod1",volume="myvol"} 0 - kube_pod_spec_volumes_persistentvolumeclaims_readonly{namespace="ns1",persistentvolumeclaim="claim2",pod="pod1",volume="my-readonly-vol"} 1 - - `, - metrics: []string{ - "kube_pod_spec_volumes_persistentvolumeclaims_info", - "kube_pod_spec_volumes_persistentvolumeclaims_readonly", - }, - }} - for _, c := range cases { - pc := &podCollector{ - store: mockPodStore{ - f: func() ([]v1.Pod, error) { return c.pods, nil }, - }, - } - if err := gatherAndCompare(pc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/replicaset.go b/collectors/replicaset.go deleted file mode 100644 index 40291bed6e..0000000000 --- a/collectors/replicaset.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/extensions/v1beta1" - "k8s.io/client-go/kubernetes" -) - -var ( - descReplicaSetCreated = prometheus.NewDesc( - "kube_replicaset_created", - "Unix creation timestamp", - []string{"namespace", "replicaset"}, nil, - ) - descReplicaSetStatusReplicas = prometheus.NewDesc( - "kube_replicaset_status_replicas", - "The number of replicas per ReplicaSet.", - []string{"namespace", "replicaset"}, nil, - ) - descReplicaSetStatusFullyLabeledReplicas = prometheus.NewDesc( - "kube_replicaset_status_fully_labeled_replicas", - "The number of fully labeled replicas per ReplicaSet.", - []string{"namespace", "replicaset"}, nil, - ) - descReplicaSetStatusReadyReplicas = prometheus.NewDesc( - "kube_replicaset_status_ready_replicas", - "The number of ready replicas per ReplicaSet.", - []string{"namespace", "replicaset"}, nil, - ) - descReplicaSetStatusObservedGeneration = prometheus.NewDesc( - "kube_replicaset_status_observed_generation", - "The generation observed by the ReplicaSet controller.", - []string{"namespace", "replicaset"}, nil, - ) - descReplicaSetSpecReplicas = prometheus.NewDesc( - "kube_replicaset_spec_replicas", - "Number of desired pods for a ReplicaSet.", - []string{"namespace", "replicaset"}, nil, - ) - descReplicaSetMetadataGeneration = prometheus.NewDesc( - "kube_replicaset_metadata_generation", - "Sequence number representing a specific generation of the desired state.", - []string{"namespace", "replicaset"}, nil, - ) -) - -type ReplicaSetLister func() ([]v1beta1.ReplicaSet, error) - -func (l ReplicaSetLister) List() ([]v1beta1.ReplicaSet, error) { - return l() -} - -func RegisterReplicaSetCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.ExtensionsV1beta1().RESTClient() - glog.Infof("collect replicaset with %s", client.APIVersion()) - - rsinfs := NewSharedInformerList(client, "replicasets", namespaces, &v1beta1.ReplicaSet{}) - - replicaSetLister := ReplicaSetLister(func() (replicasets []v1beta1.ReplicaSet, err error) { - for _, rsinf := range *rsinfs { - for _, c := range rsinf.GetStore().List() { - replicasets = append(replicasets, *(c.(*v1beta1.ReplicaSet))) - } - } - return replicasets, nil - }) - - registry.MustRegister(&replicasetCollector{store: replicaSetLister}) - rsinfs.Run(context.Background().Done()) -} - -type replicasetStore interface { - List() (replicasets []v1beta1.ReplicaSet, err error) -} - -// replicasetCollector collects metrics about all replicasets in the cluster. -type replicasetCollector struct { - store replicasetStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *replicasetCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descReplicaSetCreated - ch <- descReplicaSetStatusReplicas - ch <- descReplicaSetStatusFullyLabeledReplicas - ch <- descReplicaSetStatusReadyReplicas - ch <- descReplicaSetStatusObservedGeneration - ch <- descReplicaSetSpecReplicas - ch <- descReplicaSetMetadataGeneration -} - -// Collect implements the prometheus.Collector interface. -func (dc *replicasetCollector) Collect(ch chan<- prometheus.Metric) { - rss, err := dc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "replicaset"}).Inc() - glog.Errorf("listing replicasets failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "replicaset"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "replicaset"}).Observe(float64(len(rss))) - for _, d := range rss { - dc.collectReplicaSet(ch, d) - } - - glog.V(4).Infof("collected %d replicasets", len(rss)) -} - -func (dc *replicasetCollector) collectReplicaSet(ch chan<- prometheus.Metric, d v1beta1.ReplicaSet) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{d.Namespace, d.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - if !d.CreationTimestamp.IsZero() { - addGauge(descReplicaSetCreated, float64(d.CreationTimestamp.Unix())) - } - addGauge(descReplicaSetStatusReplicas, float64(d.Status.Replicas)) - addGauge(descReplicaSetStatusFullyLabeledReplicas, float64(d.Status.FullyLabeledReplicas)) - addGauge(descReplicaSetStatusReadyReplicas, float64(d.Status.ReadyReplicas)) - addGauge(descReplicaSetStatusObservedGeneration, float64(d.Status.ObservedGeneration)) - if d.Spec.Replicas != nil { - addGauge(descReplicaSetSpecReplicas, float64(*d.Spec.Replicas)) - } - addGauge(descReplicaSetMetadataGeneration, float64(d.ObjectMeta.Generation)) -} diff --git a/collectors/replicaset_test.go b/collectors/replicaset_test.go deleted file mode 100644 index 15c898cec0..0000000000 --- a/collectors/replicaset_test.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - rs1Replicas int32 = 5 - rs2Replicas int32 = 0 -) - -type mockReplicaSetStore struct { - f func() ([]v1beta1.ReplicaSet, error) -} - -func (rs mockReplicaSetStore) List() (replicasets []v1beta1.ReplicaSet, err error) { - return rs.f() -} - -func TestReplicaSetCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_replicaset_created Unix creation timestamp - # TYPE kube_replicaset_created gauge - # HELP kube_replicaset_metadata_generation Sequence number representing a specific generation of the desired state. - # TYPE kube_replicaset_metadata_generation gauge - # HELP kube_replicaset_status_replicas The number of replicas per ReplicaSet. - # TYPE kube_replicaset_status_replicas gauge - # HELP kube_replicaset_status_fully_labeled_replicas The number of fully labeled replicas per ReplicaSet. - # TYPE kube_replicaset_status_fully_labeled_replicas gauge - # HELP kube_replicaset_status_ready_replicas The number of ready replicas per ReplicaSet. - # TYPE kube_replicaset_status_ready_replicas gauge - # HELP kube_replicaset_status_observed_generation The generation observed by the ReplicaSet controller. - # TYPE kube_replicaset_status_observed_generation gauge - # HELP kube_replicaset_spec_replicas Number of desired pods for a ReplicaSet. - # TYPE kube_replicaset_spec_replicas gauge - ` - cases := []struct { - rss []v1beta1.ReplicaSet - want string - }{ - { - rss: []v1beta1.ReplicaSet{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "rs1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns1", - Generation: 21, - }, - Status: v1beta1.ReplicaSetStatus{ - Replicas: 5, - FullyLabeledReplicas: 10, - ReadyReplicas: 5, - ObservedGeneration: 1, - }, - Spec: v1beta1.ReplicaSetSpec{ - Replicas: &rs1Replicas, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "rs2", - Namespace: "ns2", - Generation: 14, - }, - Status: v1beta1.ReplicaSetStatus{ - Replicas: 0, - FullyLabeledReplicas: 5, - ReadyReplicas: 0, - ObservedGeneration: 5, - }, - Spec: v1beta1.ReplicaSetSpec{ - Replicas: &rs2Replicas, - }, - }, - }, - want: metadata + ` - kube_replicaset_created{namespace="ns1",replicaset="rs1"} 1.5e+09 - kube_replicaset_metadata_generation{namespace="ns1",replicaset="rs1"} 21 - kube_replicaset_metadata_generation{namespace="ns2",replicaset="rs2"} 14 - kube_replicaset_status_replicas{namespace="ns1",replicaset="rs1"} 5 - kube_replicaset_status_replicas{namespace="ns2",replicaset="rs2"} 0 - kube_replicaset_status_observed_generation{namespace="ns1",replicaset="rs1"} 1 - kube_replicaset_status_observed_generation{namespace="ns2",replicaset="rs2"} 5 - kube_replicaset_status_fully_labeled_replicas{namespace="ns1",replicaset="rs1"} 10 - kube_replicaset_status_fully_labeled_replicas{namespace="ns2",replicaset="rs2"} 5 - kube_replicaset_status_ready_replicas{namespace="ns1",replicaset="rs1"} 5 - kube_replicaset_status_ready_replicas{namespace="ns2",replicaset="rs2"} 0 - kube_replicaset_spec_replicas{namespace="ns1",replicaset="rs1"} 5 - kube_replicaset_spec_replicas{namespace="ns2",replicaset="rs2"} 0 - `, - }, - } - for _, c := range cases { - dc := &replicasetCollector{ - store: mockReplicaSetStore{ - f: func() ([]v1beta1.ReplicaSet, error) { return c.rss, nil }, - }, - } - if err := gatherAndCompare(dc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/replicationcontroller.go b/collectors/replicationcontroller.go deleted file mode 100644 index 491ee2c536..0000000000 --- a/collectors/replicationcontroller.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "context" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descReplicationControllerCreated = prometheus.NewDesc( - "kube_replicationcontroller_created", - "Unix creation timestamp", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerStatusReplicas = prometheus.NewDesc( - "kube_replicationcontroller_status_replicas", - "The number of replicas per ReplicationController.", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerStatusFullyLabeledReplicas = prometheus.NewDesc( - "kube_replicationcontroller_status_fully_labeled_replicas", - "The number of fully labeled replicas per ReplicationController.", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerStatusReadyReplicas = prometheus.NewDesc( - "kube_replicationcontroller_status_ready_replicas", - "The number of ready replicas per ReplicationController.", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerStatusAvailableReplicas = prometheus.NewDesc( - "kube_replicationcontroller_status_available_replicas", - "The number of available replicas per ReplicationController.", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerStatusObservedGeneration = prometheus.NewDesc( - "kube_replicationcontroller_status_observed_generation", - "The generation observed by the ReplicationController controller.", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerSpecReplicas = prometheus.NewDesc( - "kube_replicationcontroller_spec_replicas", - "Number of desired pods for a ReplicationController.", - []string{"namespace", "replicationcontroller"}, nil, - ) - descReplicationControllerMetadataGeneration = prometheus.NewDesc( - "kube_replicationcontroller_metadata_generation", - "Sequence number representing a specific generation of the desired state.", - []string{"namespace", "replicationcontroller"}, nil, - ) -) - -type ReplicationControllerLister func() ([]v1.ReplicationController, error) - -func (l ReplicationControllerLister) List() ([]v1.ReplicationController, error) { - return l() -} - -func RegisterReplicationControllerCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect replicationcontroller with %s", client.APIVersion()) - - rcinfs := NewSharedInformerList(client, "replicationcontrollers", namespaces, &v1.ReplicationController{}) - - replicationControllerLister := ReplicationControllerLister(func() (rcs []v1.ReplicationController, err error) { - for _, rcinf := range *rcinfs { - for _, c := range rcinf.GetStore().List() { - rcs = append(rcs, *(c.(*v1.ReplicationController))) - } - } - return rcs, nil - }) - - registry.MustRegister(&replicationcontrollerCollector{store: replicationControllerLister}) - rcinfs.Run(context.Background().Done()) -} - -type replicationcontrollerStore interface { - List() (replicationcontrollers []v1.ReplicationController, err error) -} - -type replicationcontrollerCollector struct { - store replicationcontrollerStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *replicationcontrollerCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descReplicationControllerCreated - ch <- descReplicationControllerStatusReplicas - ch <- descReplicationControllerStatusFullyLabeledReplicas - ch <- descReplicationControllerStatusReadyReplicas - ch <- descReplicationControllerStatusAvailableReplicas - ch <- descReplicationControllerStatusObservedGeneration - ch <- descReplicationControllerSpecReplicas - ch <- descReplicationControllerMetadataGeneration -} - -// Collect implements the prometheus.Collector interface. -func (dc *replicationcontrollerCollector) Collect(ch chan<- prometheus.Metric) { - rcs, err := dc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "replicationcontroller"}).Inc() - glog.Errorf("listing replicationcontrollers failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "replicationcontroller"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "replicationcontroller"}).Observe(float64(len(rcs))) - for _, d := range rcs { - dc.collectReplicationController(ch, d) - } - - glog.V(4).Infof("collected %d replicationcontrollers", len(rcs)) -} - -func (dc *replicationcontrollerCollector) collectReplicationController(ch chan<- prometheus.Metric, d v1.ReplicationController) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{d.Namespace, d.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - if !d.CreationTimestamp.IsZero() { - addGauge(descReplicationControllerCreated, float64(d.CreationTimestamp.Unix())) - } - addGauge(descReplicationControllerStatusReplicas, float64(d.Status.Replicas)) - addGauge(descReplicationControllerStatusFullyLabeledReplicas, float64(d.Status.FullyLabeledReplicas)) - addGauge(descReplicationControllerStatusReadyReplicas, float64(d.Status.ReadyReplicas)) - addGauge(descReplicationControllerStatusAvailableReplicas, float64(d.Status.AvailableReplicas)) - addGauge(descReplicationControllerStatusObservedGeneration, float64(d.Status.ObservedGeneration)) - if d.Spec.Replicas != nil { - addGauge(descReplicationControllerSpecReplicas, float64(*d.Spec.Replicas)) - } - addGauge(descReplicationControllerMetadataGeneration, float64(d.ObjectMeta.Generation)) -} diff --git a/collectors/replicationcontroller_test.go b/collectors/replicationcontroller_test.go deleted file mode 100644 index 8af76e3f3d..0000000000 --- a/collectors/replicationcontroller_test.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - rc1Replicas int32 = 5 - rc2Replicas int32 = 0 -) - -type mockReplicationControllerStore struct { - f func() ([]v1.ReplicationController, error) -} - -func (rs mockReplicationControllerStore) List() (replicationcontrollers []v1.ReplicationController, err error) { - return rs.f() -} - -func TestReplicationControllerCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_replicationcontroller_created Unix creation timestamp - # TYPE kube_replicationcontroller_created gauge - # HELP kube_replicationcontroller_metadata_generation Sequence number representing a specific generation of the desired state. - # TYPE kube_replicationcontroller_metadata_generation gauge - # HELP kube_replicationcontroller_status_replicas The number of replicas per ReplicationController. - # TYPE kube_replicationcontroller_status_replicas gauge - # HELP kube_replicationcontroller_status_fully_labeled_replicas The number of fully labeled replicas per ReplicationController. - # TYPE kube_replicationcontroller_status_fully_labeled_replicas gauge - # HELP kube_replicationcontroller_status_available_replicas The number of available replicas per ReplicationController. - # TYPE kube_replicationcontroller_status_available_replicas gauge - # HELP kube_replicationcontroller_status_ready_replicas The number of ready replicas per ReplicationController. - # TYPE kube_replicationcontroller_status_ready_replicas gauge - # HELP kube_replicationcontroller_status_observed_generation The generation observed by the ReplicationController controller. - # TYPE kube_replicationcontroller_status_observed_generation gauge - # HELP kube_replicationcontroller_spec_replicas Number of desired pods for a ReplicationController. - # TYPE kube_replicationcontroller_spec_replicas gauge - ` - cases := []struct { - rss []v1.ReplicationController - want string - }{ - { - rss: []v1.ReplicationController{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "rc1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns1", - Generation: 21, - }, - Status: v1.ReplicationControllerStatus{ - Replicas: 5, - FullyLabeledReplicas: 10, - ReadyReplicas: 5, - AvailableReplicas: 3, - ObservedGeneration: 1, - }, - Spec: v1.ReplicationControllerSpec{ - Replicas: &rc1Replicas, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "rc2", - Namespace: "ns2", - Generation: 14, - }, - Status: v1.ReplicationControllerStatus{ - Replicas: 0, - FullyLabeledReplicas: 5, - ReadyReplicas: 0, - AvailableReplicas: 0, - ObservedGeneration: 5, - }, - Spec: v1.ReplicationControllerSpec{ - Replicas: &rc2Replicas, - }, - }, - }, - want: metadata + ` - kube_replicationcontroller_created{namespace="ns1",replicationcontroller="rc1"} 1.5e+09 - kube_replicationcontroller_metadata_generation{namespace="ns1",replicationcontroller="rc1"} 21 - kube_replicationcontroller_metadata_generation{namespace="ns2",replicationcontroller="rc2"} 14 - kube_replicationcontroller_status_replicas{namespace="ns1",replicationcontroller="rc1"} 5 - kube_replicationcontroller_status_replicas{namespace="ns2",replicationcontroller="rc2"} 0 - kube_replicationcontroller_status_observed_generation{namespace="ns1",replicationcontroller="rc1"} 1 - kube_replicationcontroller_status_observed_generation{namespace="ns2",replicationcontroller="rc2"} 5 - kube_replicationcontroller_status_fully_labeled_replicas{namespace="ns1",replicationcontroller="rc1"} 10 - kube_replicationcontroller_status_fully_labeled_replicas{namespace="ns2",replicationcontroller="rc2"} 5 - kube_replicationcontroller_status_ready_replicas{namespace="ns1",replicationcontroller="rc1"} 5 - kube_replicationcontroller_status_ready_replicas{namespace="ns2",replicationcontroller="rc2"} 0 - kube_replicationcontroller_status_available_replicas{namespace="ns1",replicationcontroller="rc1"} 3 - kube_replicationcontroller_status_available_replicas{namespace="ns2",replicationcontroller="rc2"} 0 - kube_replicationcontroller_spec_replicas{namespace="ns1",replicationcontroller="rc1"} 5 - kube_replicationcontroller_spec_replicas{namespace="ns2",replicationcontroller="rc2"} 0 - `, - }, - } - for _, c := range cases { - dc := &replicationcontrollerCollector{ - store: mockReplicationControllerStore{ - f: func() ([]v1.ReplicationController, error) { return c.rss, nil }, - }, - } - if err := gatherAndCompare(dc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/resourcequota.go b/collectors/resourcequota.go deleted file mode 100644 index 4cb864deb0..0000000000 --- a/collectors/resourcequota.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descResourceQuotaCreated = prometheus.NewDesc( - "kube_resourcequota_created", - "Unix creation timestamp", - []string{"resourcequota", "namespace"}, nil, - ) - descResourceQuota = prometheus.NewDesc( - "kube_resourcequota", - "Information about resource quota.", - []string{ - "resourcequota", - "namespace", - "resource", - "type", - }, nil, - ) -) - -type ResourceQuotaLister func() (v1.ResourceQuotaList, error) - -func (l ResourceQuotaLister) List() (v1.ResourceQuotaList, error) { - return l() -} - -func RegisterResourceQuotaCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect resourcequota with %s", client.APIVersion()) - - rqinfs := NewSharedInformerList(client, "resourcequotas", namespaces, &v1.ResourceQuota{}) - - resourceQuotaLister := ResourceQuotaLister(func() (quotas v1.ResourceQuotaList, err error) { - for _, rqinf := range *rqinfs { - for _, rq := range rqinf.GetStore().List() { - quotas.Items = append(quotas.Items, *(rq.(*v1.ResourceQuota))) - } - } - return quotas, nil - }) - - registry.MustRegister(&resourceQuotaCollector{store: resourceQuotaLister}) - rqinfs.Run(context.Background().Done()) -} - -type resourceQuotaStore interface { - List() (v1.ResourceQuotaList, error) -} - -// resourceQuotaCollector collects metrics about all resource quotas in the cluster. -type resourceQuotaCollector struct { - store resourceQuotaStore -} - -// Describe implements the prometheus.Collector interface. -func (rqc *resourceQuotaCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descResourceQuotaCreated - ch <- descResourceQuota -} - -// Collect implements the prometheus.Collector interface. -func (rqc *resourceQuotaCollector) Collect(ch chan<- prometheus.Metric) { - resourceQuota, err := rqc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "resourcequota"}).Inc() - glog.Errorf("listing resource quotas failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "resourcequota"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "resourcequota"}).Observe(float64(len(resourceQuota.Items))) - for _, rq := range resourceQuota.Items { - rqc.collectResourceQuota(ch, rq) - } - - glog.V(4).Infof("collected %d resourcequotas", len(resourceQuota.Items)) -} - -func (rqc *resourceQuotaCollector) collectResourceQuota(ch chan<- prometheus.Metric, rq v1.ResourceQuota) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{rq.Name, rq.Namespace}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - - if !rq.CreationTimestamp.IsZero() { - addGauge(descResourceQuotaCreated, float64(rq.CreationTimestamp.Unix())) - } - for res, qty := range rq.Status.Hard { - addGauge(descResourceQuota, float64(qty.MilliValue())/1000, string(res), "hard") - } - for res, qty := range rq.Status.Used { - addGauge(descResourceQuota, float64(qty.MilliValue())/1000, string(res), "used") - } - -} diff --git a/collectors/resourcequota_test.go b/collectors/resourcequota_test.go deleted file mode 100644 index 3ecd29bb94..0000000000 --- a/collectors/resourcequota_test.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockResourceQuotaStore struct { - list func() (v1.ResourceQuotaList, error) -} - -func (ns mockResourceQuotaStore) List() (v1.ResourceQuotaList, error) { - return ns.list() -} - -func TestResourceQuotaCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_resourcequota Information about resource quota. - # TYPE kube_resourcequota gauge - # HELP kube_resourcequota_created Unix creation timestamp - # TYPE kube_resourcequota_created gauge - ` - cases := []struct { - quotas []v1.ResourceQuota - metrics []string // which metrics should be checked - want string - }{ - // Verify populating base metrics and that metrics for unset fields are skipped. - { - quotas: []v1.ResourceQuota{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "quotaTest", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "testNS", - }, - Status: v1.ResourceQuotaStatus{}, - }, - }, - want: metadata + ` - kube_resourcequota_created{namespace="testNS",resourcequota="quotaTest"} 1.5e+09 - `, - }, - // Verify resource metrics. - { - quotas: []v1.ResourceQuota{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "quotaTest", - Namespace: "testNS", - }, - Spec: v1.ResourceQuotaSpec{ - Hard: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4.3"), - v1.ResourceMemory: resource.MustParse("2.1G"), - v1.ResourceStorage: resource.MustParse("10G"), - v1.ResourcePods: resource.MustParse("9"), - v1.ResourceServices: resource.MustParse("8"), - v1.ResourceReplicationControllers: resource.MustParse("7"), - v1.ResourceQuotas: resource.MustParse("6"), - v1.ResourceSecrets: resource.MustParse("5"), - v1.ResourceConfigMaps: resource.MustParse("4"), - v1.ResourcePersistentVolumeClaims: resource.MustParse("3"), - v1.ResourceServicesNodePorts: resource.MustParse("2"), - v1.ResourceServicesLoadBalancers: resource.MustParse("1"), - }, - }, - Status: v1.ResourceQuotaStatus{ - Hard: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("4.3"), - v1.ResourceMemory: resource.MustParse("2.1G"), - v1.ResourceStorage: resource.MustParse("10G"), - v1.ResourcePods: resource.MustParse("9"), - v1.ResourceServices: resource.MustParse("8"), - v1.ResourceReplicationControllers: resource.MustParse("7"), - v1.ResourceQuotas: resource.MustParse("6"), - v1.ResourceSecrets: resource.MustParse("5"), - v1.ResourceConfigMaps: resource.MustParse("4"), - v1.ResourcePersistentVolumeClaims: resource.MustParse("3"), - v1.ResourceServicesNodePorts: resource.MustParse("2"), - v1.ResourceServicesLoadBalancers: resource.MustParse("1"), - }, - Used: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2.1"), - v1.ResourceMemory: resource.MustParse("500M"), - v1.ResourceStorage: resource.MustParse("9G"), - v1.ResourcePods: resource.MustParse("8"), - v1.ResourceServices: resource.MustParse("7"), - v1.ResourceReplicationControllers: resource.MustParse("6"), - v1.ResourceQuotas: resource.MustParse("5"), - v1.ResourceSecrets: resource.MustParse("4"), - v1.ResourceConfigMaps: resource.MustParse("3"), - v1.ResourcePersistentVolumeClaims: resource.MustParse("2"), - v1.ResourceServicesNodePorts: resource.MustParse("1"), - v1.ResourceServicesLoadBalancers: resource.MustParse("0"), - }, - }, - }, - }, - want: metadata + ` - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="cpu",type="hard"} 4.3 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="cpu",type="used"} 2.1 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="memory",type="hard"} 2.1e+09 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="memory",type="used"} 5e+08 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="storage",type="hard"} 1e+10 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="storage",type="used"} 9e+09 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="pods",type="hard"} 9 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="pods",type="used"} 8 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="services",type="hard"} 8 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="services",type="used"} 7 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="replicationcontrollers",type="hard"} 7 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="replicationcontrollers",type="used"} 6 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="resourcequotas",type="hard"} 6 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="resourcequotas",type="used"} 5 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="secrets",type="hard"} 5 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="secrets",type="used"} 4 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="configmaps",type="hard"} 4 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="configmaps",type="used"} 3 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="persistentvolumeclaims",type="hard"} 3 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="persistentvolumeclaims",type="used"} 2 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="services.nodeports",type="hard"} 2 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="services.nodeports",type="used"} 1 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="services.loadbalancers",type="hard"} 1 - kube_resourcequota{resourcequota="quotaTest",namespace="testNS",resource="services.loadbalancers",type="used"} 0 - `, - }, - } - for _, c := range cases { - dc := &resourceQuotaCollector{ - store: &mockResourceQuotaStore{ - list: func() (v1.ResourceQuotaList, error) { - return v1.ResourceQuotaList{Items: c.quotas}, nil - }, - }, - } - if err := gatherAndCompare(dc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/secret.go b/collectors/secret.go deleted file mode 100644 index 205f054581..0000000000 --- a/collectors/secret.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descSecretLabelsName = "kube_secret_labels" - descSecretLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descSecretLabelsDefaultLabels = []string{"namespace", "secret"} - - descSecretInfo = prometheus.NewDesc( - "kube_secret_info", - "Information about secret.", - []string{"namespace", "secret"}, nil, - ) - - descSecretType = prometheus.NewDesc( - "kube_secret_type", - "Type about secret.", - []string{"namespace", "secret", "type"}, nil, - ) - - descSecretLabels = prometheus.NewDesc( - descSecretLabelsName, - descSecretLabelsHelp, - descSecretLabelsDefaultLabels, nil, - ) - - descSecretCreated = prometheus.NewDesc( - "kube_secret_created", - "Unix creation timestamp", - []string{"namespace", "secret"}, nil, - ) - - descSecretMetadataResourceVersion = prometheus.NewDesc( - "kube_secret_metadata_resource_version", - "Resource version representing a specific version of secret.", - []string{"namespace", "secret", "resource_version"}, nil, - ) -) - -type SecretLister func() ([]v1.Secret, error) - -func (l SecretLister) List() ([]v1.Secret, error) { - return l() -} - -func RegisterSecretCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect secret with %s", client.APIVersion()) - - sinfs := NewSharedInformerList(client, "secrets", namespaces, &v1.Secret{}) - - secretLister := SecretLister(func() (secrets []v1.Secret, err error) { - for _, sinf := range *sinfs { - for _, m := range sinf.GetStore().List() { - secrets = append(secrets, *m.(*v1.Secret)) - } - } - return secrets, nil - }) - - registry.MustRegister(&secretCollector{store: secretLister}) - sinfs.Run(context.Background().Done()) -} - -type secretStore interface { - List() (secrets []v1.Secret, err error) -} - -// secretCollector collects metrics about all secrets in the cluster. -type secretCollector struct { - store secretStore -} - -// Describe implements the prometheus.Collector interface. -func (sc *secretCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descSecretInfo - ch <- descSecretCreated - ch <- descSecretLabels - ch <- descSecretMetadataResourceVersion - ch <- descSecretType -} - -// Collect implements the prometheus.Collector interface. -func (sc *secretCollector) Collect(ch chan<- prometheus.Metric) { - secrets, err := sc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "secret"}).Inc() - glog.Errorf("listing secrets failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "secret"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "secret"}).Observe(float64(len(secrets))) - for _, s := range secrets { - sc.collectSecret(ch, s) - } - - glog.V(4).Infof("collected %d secrets", len(secrets)) -} - -func secretLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descSecretLabelsName, - descSecretLabelsHelp, - append(descSecretLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (sc *secretCollector) collectSecret(ch chan<- prometheus.Metric, s v1.Secret) { - addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) { - lv = append([]string{s.Namespace, s.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, t, v, lv...) - } - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - addConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - addGauge(descSecretInfo, 1) - - addGauge(descSecretType, 1, string(s.Type)) - if !s.CreationTimestamp.IsZero() { - addGauge(descSecretCreated, float64(s.CreationTimestamp.Unix())) - } - labelKeys, labelValues := kubeLabelsToPrometheusLabels(s.Labels) - addGauge(secretLabelsDesc(labelKeys), 1, labelValues...) - - addGauge(descSecretMetadataResourceVersion, 1, string(s.ObjectMeta.ResourceVersion)) -} diff --git a/collectors/secret_test.go b/collectors/secret_test.go deleted file mode 100644 index 742dd85187..0000000000 --- a/collectors/secret_test.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockSecretStore struct { - f func() ([]v1.Secret, error) -} - -func (ss mockSecretStore) List() (secrets []v1.Secret, err error) { - return ss.f() -} - -func TestSecretCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - - startTime := 1501569018 - metav1StartTime := metav1.Unix(int64(startTime), 0) - - const metadata = ` - # HELP kube_secret_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_secret_labels gauge - # HELP kube_secret_info Information about secret. - # TYPE kube_secret_info gauge - # HELP kube_secret_type Type about secret. - # TYPE kube_secret_type gauge - # HELP kube_secret_created Unix creation timestamp - # TYPE kube_secret_created gauge - # HELP kube_secret_metadata_resource_version Resource version representing a specific version of secret. - # TYPE kube_secret_metadata_resource_version gauge - ` - cases := []struct { - secrets []v1.Secret - metrics []string - want string - }{ - { - secrets: []v1.Secret{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret1", - Namespace: "ns1", - ResourceVersion: "000000", - }, - Type: v1.SecretTypeOpaque, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret2", - Namespace: "ns2", - CreationTimestamp: metav1StartTime, - ResourceVersion: "123456", - }, - Type: v1.SecretTypeServiceAccountToken, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret3", - Namespace: "ns3", - CreationTimestamp: metav1StartTime, - Labels: map[string]string{"test-3": "test-3"}, - ResourceVersion: "abcdef", - }, - Type: v1.SecretTypeDockercfg, - }, - }, - want: metadata + ` - kube_secret_info{secret="secret1",namespace="ns1"} 1 - kube_secret_info{secret="secret2",namespace="ns2"} 1 - kube_secret_info{secret="secret3",namespace="ns3"} 1 - kube_secret_type{secret="secret1",namespace="ns1",type="Opaque"} 1 - kube_secret_type{secret="secret2",namespace="ns2",type="kubernetes.io/service-account-token"} 1 - kube_secret_type{secret="secret3",namespace="ns3",type="kubernetes.io/dockercfg"} 1 - kube_secret_created{secret="secret2",namespace="ns2"} 1.501569018e+09 - kube_secret_created{secret="secret3",namespace="ns3"} 1.501569018e+09 - kube_secret_metadata_resource_version{secret="secret1",namespace="ns1",resource_version="000000"} 1 - kube_secret_metadata_resource_version{secret="secret2",namespace="ns2",resource_version="123456"} 1 - kube_secret_metadata_resource_version{secret="secret3",namespace="ns3",resource_version="abcdef"} 1 - kube_secret_labels{secret="secret3",namespace="ns3",label_test_3="test-3"} 1 - kube_secret_labels{secret="secret2",namespace="ns2"} 1 - kube_secret_labels{secret="secret1",namespace="ns1"} 1 - `, - metrics: []string{"kube_secret_info", "kube_secret_metadata_resource_version", "kube_secret_created", "kube_secret_labels", "kube_secret_type"}, - }, - } - for _, c := range cases { - sc := &secretCollector{ - store: mockSecretStore{ - f: func() ([]v1.Secret, error) { return c.secrets, nil }, - }, - } - if err := gatherAndCompare(sc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/service.go b/collectors/service.go deleted file mode 100644 index b90901fee7..0000000000 --- a/collectors/service.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" -) - -var ( - descServiceLabelsName = "kube_service_labels" - descServiceLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descServiceLabelsDefaultLabels = []string{"namespace", "service"} - - descServiceInfo = prometheus.NewDesc( - "kube_service_info", - "Information about service.", - []string{"namespace", "service", "cluster_ip"}, nil, - ) - - descServiceCreated = prometheus.NewDesc( - "kube_service_created", - "Unix creation timestamp", - []string{"namespace", "service"}, nil, - ) - - descServiceSpecType = prometheus.NewDesc( - "kube_service_spec_type", - "Type about service.", - []string{"namespace", "service", "type"}, nil, - ) - - descServiceLabels = prometheus.NewDesc( - descServiceLabelsName, - descServiceLabelsHelp, - descServiceLabelsDefaultLabels, nil, - ) -) - -type ServiceLister func() ([]v1.Service, error) - -func (l ServiceLister) List() ([]v1.Service, error) { - return l() -} - -func RegisterServiceCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.CoreV1().RESTClient() - glog.Infof("collect service with %s", client.APIVersion()) - - sinfs := NewSharedInformerList(client, "services", namespaces, &v1.Service{}) - - serviceLister := ServiceLister(func() (services []v1.Service, err error) { - for _, sinf := range *sinfs { - for _, m := range sinf.GetStore().List() { - services = append(services, *m.(*v1.Service)) - } - } - return services, nil - }) - - registry.MustRegister(&serviceCollector{store: serviceLister}) - sinfs.Run(context.Background().Done()) -} - -type serviceStore interface { - List() (services []v1.Service, err error) -} - -// serviceCollector collects metrics about all services in the cluster. -type serviceCollector struct { - store serviceStore -} - -// Describe implements the prometheus.Collector interface. -func (pc *serviceCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descServiceInfo - ch <- descServiceLabels - ch <- descServiceCreated - ch <- descServiceSpecType -} - -// Collect implements the prometheus.Collector interface. -func (sc *serviceCollector) Collect(ch chan<- prometheus.Metric) { - services, err := sc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "service"}).Inc() - glog.Errorf("listing services failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "service"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "service"}).Observe(float64(len(services))) - for _, s := range services { - sc.collectService(ch, s) - } - glog.V(4).Infof("collected %d services", len(services)) -} - -func serviceLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descServiceLabelsName, - descServiceLabelsHelp, - append(descServiceLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (sc *serviceCollector) collectService(ch chan<- prometheus.Metric, s v1.Service) { - addConstMetric := func(desc *prometheus.Desc, t prometheus.ValueType, v float64, lv ...string) { - lv = append([]string{s.Namespace, s.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, t, v, lv...) - } - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - addConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - addGauge(descServiceSpecType, 1, string(s.Spec.Type)) - - addGauge(descServiceInfo, 1, s.Spec.ClusterIP) - if !s.CreationTimestamp.IsZero() { - addGauge(descServiceCreated, float64(s.CreationTimestamp.Unix())) - } - labelKeys, labelValues := kubeLabelsToPrometheusLabels(s.Labels) - addGauge(serviceLabelsDesc(labelKeys), 1, labelValues...) -} diff --git a/collectors/service_test.go b/collectors/service_test.go deleted file mode 100644 index 85824d03b4..0000000000 --- a/collectors/service_test.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type mockServiceStore struct { - list func() ([]v1.Service, error) -} - -func (ss mockServiceStore) List() ([]v1.Service, error) { - return ss.list() -} - -func TestServiceCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_service_info Information about service. - # TYPE kube_service_info gauge - # HELP kube_service_created Unix creation timestamp - # TYPE kube_service_created gauge - # HELP kube_service_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_service_labels gauge - # HELP kube_service_spec_type Type about service. - # TYPE kube_service_spec_type gauge - ` - cases := []struct { - services []v1.Service - metrics []string // which metrics should be checked - want string - }{ - { - services: []v1.Service{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-service1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "default", - Labels: map[string]string{ - "app": "example1", - }, - }, - Spec: v1.ServiceSpec{ - ClusterIP: "1.2.3.4", - Type: v1.ServiceTypeClusterIP, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-service2", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "default", - Labels: map[string]string{ - "app": "example2", - }, - }, - Spec: v1.ServiceSpec{ - ClusterIP: "1.2.3.5", - Type: v1.ServiceTypeNodePort, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-service3", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "default", - Labels: map[string]string{ - "app": "example3", - }, - }, - Spec: v1.ServiceSpec{ - ClusterIP: "1.2.3.6", - Type: v1.ServiceTypeLoadBalancer, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-service4", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "default", - Labels: map[string]string{ - "app": "example4", - }, - }, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeExternalName, - }, - }, - }, - want: metadata + ` - kube_service_created{namespace="default",service="test-service1"} 1.5e+09 - kube_service_created{namespace="default",service="test-service2"} 1.5e+09 - kube_service_created{namespace="default",service="test-service3"} 1.5e+09 - kube_service_created{namespace="default",service="test-service4"} 1.5e+09 - kube_service_info{cluster_ip="",namespace="default",service="test-service4"} 1 - kube_service_info{cluster_ip="1.2.3.4",namespace="default",service="test-service1"} 1 - kube_service_info{cluster_ip="1.2.3.5",namespace="default",service="test-service2"} 1 - kube_service_info{cluster_ip="1.2.3.6",namespace="default",service="test-service3"} 1 - kube_service_labels{label_app="example1",namespace="default",service="test-service1"} 1 - kube_service_labels{label_app="example2",namespace="default",service="test-service2"} 1 - kube_service_labels{label_app="example3",namespace="default",service="test-service3"} 1 - kube_service_labels{label_app="example4",namespace="default",service="test-service4"} 1 - kube_service_spec_type{namespace="default",service="test-service1",type="ClusterIP"} 1 - kube_service_spec_type{namespace="default",service="test-service2",type="NodePort"} 1 - kube_service_spec_type{namespace="default",service="test-service3",type="LoadBalancer"} 1 - kube_service_spec_type{namespace="default",service="test-service4",type="ExternalName"} 1 - `, - }, - } - for _, c := range cases { - sc := &serviceCollector{ - store: &mockServiceStore{ - list: func() ([]v1.Service, error) { - return c.services, nil - }, - }, - } - if err := gatherAndCompare(sc, c.want, c.metrics); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/collectors/statefulset.go b/collectors/statefulset.go deleted file mode 100644 index ffa6ae2f7d..0000000000 --- a/collectors/statefulset.go +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "k8s.io/api/apps/v1beta1" - "k8s.io/client-go/kubernetes" -) - -var ( - descStatefulSetLabelsName = "kube_statefulset_labels" - descStatefulSetLabelsHelp = "Kubernetes labels converted to Prometheus labels." - descStatefulSetLabelsDefaultLabels = []string{"namespace", "statefulset"} - - descStatefulSetCreated = prometheus.NewDesc( - "kube_statefulset_created", - "Unix creation timestamp", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetStatusReplicas = prometheus.NewDesc( - "kube_statefulset_status_replicas", - "The number of replicas per StatefulSet.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetStatusReplicasCurrent = prometheus.NewDesc( - "kube_statefulset_status_replicas_current", - "The number of current replicas per StatefulSet.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetStatusReplicasReady = prometheus.NewDesc( - "kube_statefulset_status_replicas_ready", - "The number of ready replicas per StatefulSet.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetStatusReplicasUpdated = prometheus.NewDesc( - "kube_statefulset_status_replicas_updated", - "The number of updated replicas per StatefulSet.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetStatusObservedGeneration = prometheus.NewDesc( - "kube_statefulset_status_observed_generation", - "The generation observed by the StatefulSet controller.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetSpecReplicas = prometheus.NewDesc( - "kube_statefulset_replicas", - "Number of desired pods for a StatefulSet.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetMetadataGeneration = prometheus.NewDesc( - "kube_statefulset_metadata_generation", - "Sequence number representing a specific generation of the desired state for the StatefulSet.", - []string{"namespace", "statefulset"}, nil, - ) - - descStatefulSetLabels = prometheus.NewDesc( - descStatefulSetLabelsName, - descStatefulSetLabelsHelp, - descStatefulSetLabelsDefaultLabels, nil, - ) -) - -type StatefulSetLister func() ([]v1beta1.StatefulSet, error) - -func (l StatefulSetLister) List() ([]v1beta1.StatefulSet, error) { - return l() -} - -func RegisterStatefulSetCollector(registry prometheus.Registerer, kubeClient kubernetes.Interface, namespaces []string) { - client := kubeClient.AppsV1beta1().RESTClient() - glog.Infof("collect statefulset with %s", client.APIVersion()) - - dinfs := NewSharedInformerList(client, "statefulsets", namespaces, &v1beta1.StatefulSet{}) - - statefulSetLister := StatefulSetLister(func() (statefulSets []v1beta1.StatefulSet, err error) { - for _, dinf := range *dinfs { - for _, c := range dinf.GetStore().List() { - statefulSets = append(statefulSets, *(c.(*v1beta1.StatefulSet))) - } - } - return statefulSets, nil - }) - - registry.MustRegister(&statefulSetCollector{store: statefulSetLister}) - dinfs.Run(context.Background().Done()) -} - -type statefulSetStore interface { - List() (statefulSets []v1beta1.StatefulSet, err error) -} - -type statefulSetCollector struct { - store statefulSetStore -} - -// Describe implements the prometheus.Collector interface. -func (dc *statefulSetCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- descStatefulSetCreated - ch <- descStatefulSetStatusReplicas - ch <- descStatefulSetStatusReplicasCurrent - ch <- descStatefulSetStatusReplicasReady - ch <- descStatefulSetStatusReplicasUpdated - ch <- descStatefulSetStatusObservedGeneration - ch <- descStatefulSetSpecReplicas - ch <- descStatefulSetMetadataGeneration - ch <- descStatefulSetLabels -} - -// Collect implements the prometheus.Collector interface. -func (sc *statefulSetCollector) Collect(ch chan<- prometheus.Metric) { - sss, err := sc.store.List() - if err != nil { - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "statefulset"}).Inc() - glog.Errorf("listing statefulsets failed: %s", err) - return - } - ScrapeErrorTotalMetric.With(prometheus.Labels{"resource": "statefulset"}).Add(0) - - ResourcesPerScrapeMetric.With(prometheus.Labels{"resource": "statefulset"}).Observe(float64(len(sss))) - for _, d := range sss { - sc.collectStatefulSet(ch, d) - } - - glog.V(4).Infof("collected %d statefulsets", len(sss)) -} - -func statefulSetLabelsDesc(labelKeys []string) *prometheus.Desc { - return prometheus.NewDesc( - descStatefulSetLabelsName, - descStatefulSetLabelsHelp, - append(descStatefulSetLabelsDefaultLabels, labelKeys...), - nil, - ) -} - -func (dc *statefulSetCollector) collectStatefulSet(ch chan<- prometheus.Metric, statefulSet v1beta1.StatefulSet) { - addGauge := func(desc *prometheus.Desc, v float64, lv ...string) { - lv = append([]string{statefulSet.Namespace, statefulSet.Name}, lv...) - ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, lv...) - } - if !statefulSet.CreationTimestamp.IsZero() { - addGauge(descStatefulSetCreated, float64(statefulSet.CreationTimestamp.Unix())) - } - addGauge(descStatefulSetStatusReplicas, float64(statefulSet.Status.Replicas)) - addGauge(descStatefulSetStatusReplicasCurrent, float64(statefulSet.Status.CurrentReplicas)) - addGauge(descStatefulSetStatusReplicasReady, float64(statefulSet.Status.ReadyReplicas)) - addGauge(descStatefulSetStatusReplicasUpdated, float64(statefulSet.Status.UpdatedReplicas)) - if statefulSet.Status.ObservedGeneration != nil { - addGauge(descStatefulSetStatusObservedGeneration, float64(*statefulSet.Status.ObservedGeneration)) - } - - if statefulSet.Spec.Replicas != nil { - addGauge(descStatefulSetSpecReplicas, float64(*statefulSet.Spec.Replicas)) - } - addGauge(descStatefulSetMetadataGeneration, float64(statefulSet.ObjectMeta.Generation)) - - labelKeys, labelValues := kubeLabelsToPrometheusLabels(statefulSet.Labels) - addGauge(statefulSetLabelsDesc(labelKeys), 1, labelValues...) -} diff --git a/collectors/statefulset_test.go b/collectors/statefulset_test.go deleted file mode 100644 index 9df3885d7a..0000000000 --- a/collectors/statefulset_test.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package collectors - -import ( - "testing" - "time" - - "k8s.io/api/apps/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - statefulSet1Replicas int32 = 3 - statefulSet2Replicas int32 = 6 - statefulSet3Replicas int32 = 9 - - statefulSet1ObservedGeneration int64 = 1 - statefulSet2ObservedGeneration int64 = 2 -) - -type mockStatefulSetStore struct { - f func() ([]v1beta1.StatefulSet, error) -} - -func (ds mockStatefulSetStore) List() (deployments []v1beta1.StatefulSet, err error) { - return ds.f() -} - -func TestStatefuleSetCollector(t *testing.T) { - // Fixed metadata on type and help text. We prepend this to every expected - // output so we only have to modify a single place when doing adjustments. - const metadata = ` - # HELP kube_statefulset_created Unix creation timestamp - # TYPE kube_statefulset_created gauge - # HELP kube_statefulset_status_replicas The number of replicas per StatefulSet. - # TYPE kube_statefulset_status_replicas gauge - # HELP kube_statefulset_status_replicas_current The number of current replicas per StatefulSet. - # TYPE kube_statefulset_status_replicas_current gauge - # HELP kube_statefulset_status_replicas_ready The number of ready replicas per StatefulSet. - # TYPE kube_statefulset_status_replicas_ready gauge - # HELP kube_statefulset_status_replicas_updated The number of updated replicas per StatefulSet. - # TYPE kube_statefulset_status_replicas_updated gauge - # HELP kube_statefulset_status_observed_generation The generation observed by the StatefulSet controller. - # TYPE kube_statefulset_status_observed_generation gauge - # HELP kube_statefulset_replicas Number of desired pods for a StatefulSet. - # TYPE kube_statefulset_replicas gauge - # HELP kube_statefulset_metadata_generation Sequence number representing a specific generation of the desired state for the StatefulSet. - # TYPE kube_statefulset_metadata_generation gauge - # HELP kube_statefulset_labels Kubernetes labels converted to Prometheus labels. - # TYPE kube_statefulset_labels gauge - ` - cases := []struct { - depls []v1beta1.StatefulSet - want string - }{ - { - depls: []v1beta1.StatefulSet{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "statefulset1", - CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, - Namespace: "ns1", - Labels: map[string]string{ - "app": "example1", - }, - Generation: 3, - }, - Spec: v1beta1.StatefulSetSpec{ - Replicas: &statefulSet1Replicas, - ServiceName: "statefulset1service", - }, - Status: v1beta1.StatefulSetStatus{ - ObservedGeneration: &statefulSet1ObservedGeneration, - Replicas: 2, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "statefulset2", - Namespace: "ns2", - Labels: map[string]string{ - "app": "example2", - }, - Generation: 21, - }, - Spec: v1beta1.StatefulSetSpec{ - Replicas: &statefulSet2Replicas, - ServiceName: "statefulset2service", - }, - Status: v1beta1.StatefulSetStatus{ - CurrentReplicas: 2, - ObservedGeneration: &statefulSet2ObservedGeneration, - ReadyReplicas: 5, - Replicas: 5, - UpdatedReplicas: 3, - }, - }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "statefulset3", - Namespace: "ns3", - Labels: map[string]string{ - "app": "example3", - }, - Generation: 36, - }, - Spec: v1beta1.StatefulSetSpec{ - Replicas: &statefulSet3Replicas, - ServiceName: "statefulset2service", - }, - Status: v1beta1.StatefulSetStatus{ - ObservedGeneration: nil, - Replicas: 7, - }, - }, - }, - want: metadata + ` - kube_statefulset_created{namespace="ns1",statefulset="statefulset1"} 1.5e+09 - kube_statefulset_status_replicas{namespace="ns1",statefulset="statefulset1"} 2 - kube_statefulset_status_replicas{namespace="ns2",statefulset="statefulset2"} 5 - kube_statefulset_status_replicas{namespace="ns3",statefulset="statefulset3"} 7 - kube_statefulset_status_replicas_current{namespace="ns1",statefulset="statefulset1"} 0 - kube_statefulset_status_replicas_current{namespace="ns2",statefulset="statefulset2"} 2 - kube_statefulset_status_replicas_current{namespace="ns3",statefulset="statefulset3"} 0 - kube_statefulset_status_replicas_ready{namespace="ns1",statefulset="statefulset1"} 0 - kube_statefulset_status_replicas_ready{namespace="ns2",statefulset="statefulset2"} 5 - kube_statefulset_status_replicas_ready{namespace="ns3",statefulset="statefulset3"} 0 - kube_statefulset_status_replicas_updated{namespace="ns1",statefulset="statefulset1"} 0 - kube_statefulset_status_replicas_updated{namespace="ns2",statefulset="statefulset2"} 3 - kube_statefulset_status_replicas_updated{namespace="ns3",statefulset="statefulset3"} 0 - kube_statefulset_status_observed_generation{namespace="ns1",statefulset="statefulset1"} 1 - kube_statefulset_status_observed_generation{namespace="ns2",statefulset="statefulset2"} 2 - kube_statefulset_replicas{namespace="ns1",statefulset="statefulset1"} 3 - kube_statefulset_replicas{namespace="ns2",statefulset="statefulset2"} 6 - kube_statefulset_replicas{namespace="ns3",statefulset="statefulset3"} 9 - kube_statefulset_metadata_generation{namespace="ns1",statefulset="statefulset1"} 3 - kube_statefulset_metadata_generation{namespace="ns2",statefulset="statefulset2"} 21 - kube_statefulset_metadata_generation{namespace="ns3",statefulset="statefulset3"} 36 - kube_statefulset_labels{label_app="example1",namespace="ns1",statefulset="statefulset1"} 1 - kube_statefulset_labels{label_app="example2",namespace="ns2",statefulset="statefulset2"} 1 - kube_statefulset_labels{label_app="example3",namespace="ns3",statefulset="statefulset3"} 1 - `, - }, - } - for _, c := range cases { - sc := &statefulSetCollector{ - store: mockStatefulSetStore{ - f: func() ([]v1beta1.StatefulSet, error) { return c.depls, nil }, - }, - } - if err := gatherAndCompare(sc, c.want, nil); err != nil { - t.Errorf("unexpected collecting result:\n%s", err) - } - } -} diff --git a/kubernetes/kube-state-metrics-cluster-role-binding.yaml b/kubernetes/kube-state-metrics-cluster-role-binding.yaml index 9c19c2faca..4c0bade85c 100644 --- a/kubernetes/kube-state-metrics-cluster-role-binding.yaml +++ b/kubernetes/kube-state-metrics-cluster-role-binding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1 +apiVersion: rbac.authorization.k8s.io/v1 # kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: @@ -11,4 +11,3 @@ subjects: - kind: ServiceAccount name: kube-state-metrics namespace: kube-system - diff --git a/kubernetes/kube-state-metrics-cluster-role.yaml b/kubernetes/kube-state-metrics-cluster-role.yaml index f9dbb96993..fe227b0b69 100644 --- a/kubernetes/kube-state-metrics-cluster-role.yaml +++ b/kubernetes/kube-state-metrics-cluster-role.yaml @@ -38,3 +38,7 @@ rules: resources: - horizontalpodautoscalers verbs: ["list", "watch"] +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] diff --git a/kubernetes/kube-state-metrics-deployment.yaml b/kubernetes/kube-state-metrics-deployment.yaml index 57d801b65d..734071e6ed 100644 --- a/kubernetes/kube-state-metrics-deployment.yaml +++ b/kubernetes/kube-state-metrics-deployment.yaml @@ -18,7 +18,7 @@ spec: serviceAccountName: kube-state-metrics containers: - name: kube-state-metrics - image: quay.io/coreos/kube-state-metrics:v1.3.0 + image: quay.io/coreos/kube-state-metrics:v1.5.0 ports: - name: http-metrics containerPort: 8080 @@ -31,14 +31,14 @@ spec: initialDelaySeconds: 5 timeoutSeconds: 5 - name: addon-resizer - image: k8s.gcr.io/addon-resizer:1.7 + image: k8s.gcr.io/addon-resizer:1.8.3 resources: limits: - cpu: 100m - memory: 30Mi + cpu: 150m + memory: 50Mi requests: - cpu: 100m - memory: 30Mi + cpu: 150m + memory: 50Mi env: - name: MY_POD_NAME valueFrom: diff --git a/kubernetes/kube-state-metrics-role-binding.yaml b/kubernetes/kube-state-metrics-role-binding.yaml index 53e559291a..517d2c859d 100644 --- a/kubernetes/kube-state-metrics-role-binding.yaml +++ b/kubernetes/kube-state-metrics-role-binding.yaml @@ -12,4 +12,3 @@ subjects: - kind: ServiceAccount name: kube-state-metrics namespace: kube-system - diff --git a/kubernetes/kube-state-metrics-role.yaml b/kubernetes/kube-state-metrics-role.yaml index f30fcaff53..725c526075 100644 --- a/kubernetes/kube-state-metrics-role.yaml +++ b/kubernetes/kube-state-metrics-role.yaml @@ -14,4 +14,3 @@ rules: - deployments resourceNames: ["kube-state-metrics"] verbs: ["get", "update"] - diff --git a/kubernetes/kube-state-metrics-service-account.yaml b/kubernetes/kube-state-metrics-service-account.yaml index ab22e8d97c..577153b254 100644 --- a/kubernetes/kube-state-metrics-service-account.yaml +++ b/kubernetes/kube-state-metrics-service-account.yaml @@ -3,4 +3,3 @@ kind: ServiceAccount metadata: name: kube-state-metrics namespace: kube-system - diff --git a/kubernetes/kube-state-metrics-service.yaml b/kubernetes/kube-state-metrics-service.yaml index 4478ca8dd8..b39f241cb7 100644 --- a/kubernetes/kube-state-metrics-service.yaml +++ b/kubernetes/kube-state-metrics-service.yaml @@ -19,4 +19,3 @@ spec: protocol: TCP selector: k8s-app: kube-state-metrics - diff --git a/main.go b/main.go index cedb69160e..384b2af948 100644 --- a/main.go +++ b/main.go @@ -17,14 +17,15 @@ limitations under the License. package main import ( - "flag" + "compress/gzip" + "context" "fmt" + "io" "log" "net" "net/http" "net/http/pprof" "os" - "sort" "strconv" "strings" @@ -32,13 +33,14 @@ import ( "github.com/openshift/origin/pkg/util/proc" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/spf13/pflag" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/tools/clientcmd" - kcollectors "k8s.io/kube-state-metrics/collectors" - "k8s.io/kube-state-metrics/version" + kcollectors "k8s.io/kube-state-metrics/pkg/collectors" + "k8s.io/kube-state-metrics/pkg/options" + "k8s.io/kube-state-metrics/pkg/version" + "k8s.io/kube-state-metrics/pkg/whiteblacklist" ) const ( @@ -46,215 +48,101 @@ const ( healthzPath = "/healthz" ) -var ( - defaultNamespaces = namespaceList{metav1.NamespaceAll} - defaultCollectors = collectorSet{ - "daemonsets": struct{}{}, - "deployments": struct{}{}, - "limitranges": struct{}{}, - "nodes": struct{}{}, - "pods": struct{}{}, - "replicasets": struct{}{}, - "replicationcontrollers": struct{}{}, - "resourcequotas": struct{}{}, - "services": struct{}{}, - "jobs": struct{}{}, - "cronjobs": struct{}{}, - "statefulsets": struct{}{}, - "persistentvolumes": struct{}{}, - "persistentvolumeclaims": struct{}{}, - "namespaces": struct{}{}, - "horizontalpodautoscalers": struct{}{}, - "endpoints": struct{}{}, - "secrets": struct{}{}, - "configmaps": struct{}{}, - } - availableCollectors = map[string]func(registry prometheus.Registerer, kubeClient clientset.Interface, namespaces []string){ - "cronjobs": kcollectors.RegisterCronJobCollector, - "daemonsets": kcollectors.RegisterDaemonSetCollector, - "deployments": kcollectors.RegisterDeploymentCollector, - "jobs": kcollectors.RegisterJobCollector, - "limitranges": kcollectors.RegisterLimitRangeCollector, - "nodes": kcollectors.RegisterNodeCollector, - "pods": kcollectors.RegisterPodCollector, - "replicasets": kcollectors.RegisterReplicaSetCollector, - "replicationcontrollers": kcollectors.RegisterReplicationControllerCollector, - "resourcequotas": kcollectors.RegisterResourceQuotaCollector, - "services": kcollectors.RegisterServiceCollector, - "statefulsets": kcollectors.RegisterStatefulSetCollector, - "persistentvolumes": kcollectors.RegisterPersistentVolumeCollector, - "persistentvolumeclaims": kcollectors.RegisterPersistentVolumeClaimCollector, - "namespaces": kcollectors.RegisterNamespaceCollector, - "horizontalpodautoscalers": kcollectors.RegisterHorizontalPodAutoScalerCollector, - "endpoints": kcollectors.RegisterEndpointCollector, - "secrets": kcollectors.RegisterSecretCollector, - "configmaps": kcollectors.RegisterConfigMapCollector, - } -) - // promLogger implements promhttp.Logger type promLogger struct{} func (pl promLogger) Println(v ...interface{}) { - glog.Error(v) -} - -type collectorSet map[string]struct{} - -func (c *collectorSet) String() string { - s := *c - ss := s.asSlice() - sort.Strings(ss) - return strings.Join(ss, ",") -} - -func (c *collectorSet) Set(value string) error { - s := *c - cols := strings.Split(value, ",") - for _, col := range cols { - col = strings.TrimSpace(col) - if len(col) != 0 { - _, ok := availableCollectors[col] - if !ok { - glog.Fatalf("Collector \"%s\" does not exist", col) - } - s[col] = struct{}{} - } - } - return nil -} - -func (c collectorSet) asSlice() []string { - cols := []string{} - for col := range c { - cols = append(cols, col) - } - return cols -} - -func (c collectorSet) isEmpty() bool { - return len(c.asSlice()) == 0 -} - -func (c *collectorSet) Type() string { - return "string" -} - -type namespaceList []string - -func (n *namespaceList) String() string { - return strings.Join(*n, ",") -} - -func (n *namespaceList) IsAllNamespaces() bool { - return len(*n) == 1 && (*n)[0] == metav1.NamespaceAll -} - -func (n *namespaceList) Set(value string) error { - splittedNamespaces := strings.Split(value, ",") - for _, ns := range splittedNamespaces { - ns = strings.TrimSpace(ns) - if len(ns) != 0 { - *n = append(*n, ns) - } - } - return nil -} - -func (n *namespaceList) Type() string { - return "string" -} - -type options struct { - apiserver string - kubeconfig string - help bool - port int - host string - telemetryPort int - telemetryHost string - collectors collectorSet - namespaces namespaceList - version bool + glog.Error(v...) } func main() { - options := &options{collectors: make(collectorSet)} - flags := pflag.NewFlagSet("", pflag.ExitOnError) - // add glog flags - flags.AddGoFlagSet(flag.CommandLine) - flags.Lookup("logtostderr").Value.Set("true") - flags.Lookup("logtostderr").DefValue = "true" - flags.Lookup("logtostderr").NoOptDefVal = "true" - flags.StringVar(&options.apiserver, "apiserver", "", `The URL of the apiserver to use as a master`) - flags.StringVar(&options.kubeconfig, "kubeconfig", "", "Absolute path to the kubeconfig file") - flags.BoolVarP(&options.help, "help", "h", false, "Print help text") - flags.IntVar(&options.port, "port", 80, `Port to expose metrics on.`) - flags.StringVar(&options.host, "host", "0.0.0.0", `Host to expose metrics on.`) - flags.IntVar(&options.telemetryPort, "telemetry-port", 81, `Port to expose kube-state-metrics self metrics on.`) - flags.StringVar(&options.telemetryHost, "telemetry-host", "0.0.0.0", `Host to expose kube-state-metrics self metrics on.`) - flags.Var(&options.collectors, "collectors", fmt.Sprintf("Comma-separated list of collectors to be enabled. Defaults to %q", &defaultCollectors)) - flags.Var(&options.namespaces, "namespace", fmt.Sprintf("Comma-separated list of namespaces to be enabled. Defaults to %q", &defaultNamespaces)) - flags.BoolVarP(&options.version, "version", "", false, "kube-state-metrics build version information") - - flags.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - flags.PrintDefaults() - } + opts := options.NewOptions() + opts.AddFlags() - err := flags.Parse(os.Args) + err := opts.Parse() if err != nil { glog.Fatalf("Error: %s", err) } - if options.version { + if opts.Version { fmt.Printf("%#v\n", version.GetVersion()) os.Exit(0) } - if options.help { - flags.Usage() + if opts.Help { + opts.Usage() os.Exit(0) } - var collectors collectorSet - if len(options.collectors) == 0 { + collectorBuilder := kcollectors.NewBuilder(context.TODO()) + + if len(opts.Collectors) == 0 { glog.Info("Using default collectors") - collectors = defaultCollectors + collectorBuilder.WithEnabledCollectors(options.DefaultCollectors.AsSlice()) } else { - collectors = options.collectors + glog.Infof("Using collectors %s", opts.Collectors.String()) + collectorBuilder.WithEnabledCollectors(opts.Collectors.AsSlice()) } - var namespaces namespaceList - if len(options.namespaces) == 0 { - namespaces = defaultNamespaces + if len(opts.Namespaces) == 0 { + glog.Info("Using all namespace") + collectorBuilder.WithNamespaces(options.DefaultNamespaces) } else { - namespaces = options.namespaces + if opts.Namespaces.IsAllNamespaces() { + glog.Info("Using all namespace") + } else { + glog.Infof("Using %s namespaces", opts.Namespaces) + } + collectorBuilder.WithNamespaces(opts.Namespaces) } - if namespaces.IsAllNamespaces() { - glog.Info("Using all namespace") - } else { - glog.Infof("Using %s namespaces", namespaces) + whiteBlackList, err := whiteblacklist.New(opts.MetricWhitelist, opts.MetricBlacklist) + if err != nil { + glog.Fatal(err) + } + + if opts.DisablePodNonGenericResourceMetrics { + whiteBlackList.Exclude([]string{ + "kube_pod_container_resource_requests_cpu_cores", + "kube_pod_container_resource_requests_memory_bytes", + "kube_pod_container_resource_limits_cpu_cores", + "kube_pod_container_resource_limits_memory_bytes", + }) } + if opts.DisableNodeNonGenericResourceMetrics { + whiteBlackList.Exclude([]string{ + "kube_node_status_capacity_cpu_cores", + "kube_node_status_capacity_memory_bytes", + "kube_node_status_capacity_pods", + "kube_node_status_allocatable_cpu_cores", + "kube_node_status_allocatable_memory_bytes", + "kube_node_status_allocatable_pods", + }) + } + + glog.Infof("metric white-blacklisting: %v", whiteBlackList.Status()) + + collectorBuilder.WithWhiteBlackList(whiteBlackList) + proc.StartReaper() - kubeClient, err := createKubeClient(options.apiserver, options.kubeconfig) + kubeClient, err := createKubeClient(opts.Apiserver, opts.Kubeconfig) if err != nil { glog.Fatalf("Failed to create client: %v", err) } + collectorBuilder.WithKubeClient(kubeClient) ksmMetricsRegistry := prometheus.NewRegistry() ksmMetricsRegistry.Register(kcollectors.ResourcesPerScrapeMetric) ksmMetricsRegistry.Register(kcollectors.ScrapeErrorTotalMetric) ksmMetricsRegistry.Register(prometheus.NewProcessCollector(os.Getpid(), "")) ksmMetricsRegistry.Register(prometheus.NewGoCollector()) - go telemetryServer(ksmMetricsRegistry, options.telemetryHost, options.telemetryPort) + go telemetryServer(ksmMetricsRegistry, opts.TelemetryHost, opts.TelemetryPort) - registry := prometheus.NewRegistry() - registerCollectors(registry, kubeClient, collectors, namespaces) - metricsServer(registry, options.host, options.port) + collectors := collectorBuilder.Build() + + serveMetrics(collectors, opts.Host, opts.Port, opts.EnableGZIPEncoding) } func createKubeClient(apiserver string, kubeconfig string) (clientset.Interface, error) { @@ -263,6 +151,10 @@ func createKubeClient(apiserver string, kubeconfig string) (clientset.Interface, return nil, err } + config.UserAgent = version.GetVersion().String() + config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + config.ContentType = "application/vnd.kubernetes.protobuf" + kubeClient, err := clientset.NewForConfig(config) if err != nil { return nil, err @@ -308,7 +200,8 @@ func telemetryServer(registry prometheus.Gatherer, host string, port int) { log.Fatal(http.ListenAndServe(listenAddress, mux)) } -func metricsServer(registry prometheus.Gatherer, host string, port int) { +// TODO: How about accepting an interface Collector instead? +func serveMetrics(collectors []*kcollectors.Collector, host string, port int, enableGZIPEncoding bool) { // Address to listen on for web interface and telemetry listenAddress := net.JoinHostPort(host, strconv.Itoa(port)) @@ -316,6 +209,7 @@ func metricsServer(registry prometheus.Gatherer, host string, port int) { mux := http.NewServeMux() + // TODO: This doesn't belong into serveMetrics mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) @@ -323,7 +217,7 @@ func metricsServer(registry prometheus.Gatherer, host string, port int) { mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) // Add metricsPath - mux.Handle(metricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorLog: promLogger{}})) + mux.Handle(metricsPath, &metricHandler{collectors, enableGZIPEncoding}) // Add healthzPath mux.HandleFunc(healthzPath, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) @@ -345,17 +239,37 @@ func metricsServer(registry prometheus.Gatherer, host string, port int) { log.Fatal(http.ListenAndServe(listenAddress, mux)) } -// registerCollectors creates and starts informers and initializes and -// registers metrics for collection. -func registerCollectors(registry prometheus.Registerer, kubeClient clientset.Interface, enabledCollectors collectorSet, namespaces namespaceList) { - activeCollectors := []string{} - for c := range enabledCollectors { - f, ok := availableCollectors[c] - if ok { - f(registry, kubeClient, namespaces) - activeCollectors = append(activeCollectors, c) +type metricHandler struct { + collectors []*kcollectors.Collector + enableGZIPEncoding bool +} + +func (m *metricHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + resHeader := w.Header() + var writer io.Writer = w + + resHeader.Set("Content-Type", `text/plain; version=`+"0.0.4") + + if m.enableGZIPEncoding { + // Gzip response if requested. Taken from + // github.com/prometheus/client_golang/prometheus/promhttp.decorateWriter. + reqHeader := r.Header.Get("Accept-Encoding") + parts := strings.Split(reqHeader, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + writer = gzip.NewWriter(writer) + resHeader.Set("Content-Encoding", "gzip") + } } } - glog.Infof("Active collectors: %s", strings.Join(activeCollectors, ",")) + for _, c := range m.collectors { + c.Collect(w) + } + + // In case we gziped the response, we have to close the writer. + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } } diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000000..258e4dca72 --- /dev/null +++ b/main_test.go @@ -0,0 +1,419 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "context" + "io/ioutil" + "net/http/httptest" + "sort" + "strconv" + "strings" + "testing" + "time" + + kcollectors "k8s.io/kube-state-metrics/pkg/collectors" + "k8s.io/kube-state-metrics/pkg/options" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kube-state-metrics/pkg/whiteblacklist" +) + +func BenchmarkKubeStateMetrics(b *testing.B) { + var collectors []*kcollectors.Collector + fixtureMultiplier := 1000 + requestCount := 1000 + + b.Logf( + "starting kube-state-metrics benchmark with fixtureMultiplier %v and requestCount %v", + fixtureMultiplier, + requestCount, + ) + + kubeClient := fake.NewSimpleClientset() + + if err := injectFixtures(kubeClient, fixtureMultiplier); err != nil { + b.Errorf("error injecting resources: %v", err) + } + + builder := kcollectors.NewBuilder(context.TODO()) + builder.WithEnabledCollectors(options.DefaultCollectors.AsSlice()) + builder.WithKubeClient(kubeClient) + builder.WithNamespaces(options.DefaultNamespaces) + + l, err := whiteblacklist.New(map[string]struct{}{}, map[string]struct{}{}) + if err != nil { + b.Fatal(err) + } + builder.WithWhiteBlackList(l) + + // This test is not suitable to be compared in terms of time, as it includes + // a one second wait. Use for memory allocation comparisons, profiling, ... + b.Run("GenerateMetrics", func(b *testing.B) { + collectors = builder.Build() + + // Wait for caches to fill + time.Sleep(time.Second) + }) + + handler := metricHandler{collectors, false} + req := httptest.NewRequest("GET", "http://localhost:8080/metrics", nil) + + b.Run("MakeRequests", func(b *testing.B) { + var accumulatedContentLength int + + for i := 0; i < requestCount; i++ { + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + + resp := w.Result() + if resp.StatusCode != 200 { + b.Fatalf("expected 200 status code but got %v", resp.StatusCode) + } + + b.StopTimer() + buf := bytes.Buffer{} + buf.ReadFrom(resp.Body) + accumulatedContentLength += buf.Len() + b.StartTimer() + } + + b.SetBytes(int64(accumulatedContentLength)) + }) +} + +// TestFullScrapeCycle is a simple smoke test covering the entire cycle from +// cache filling to scraping. +func TestFullScrapeCycle(t *testing.T) { + t.Parallel() + + kubeClient := fake.NewSimpleClientset() + + err := pod(kubeClient, 0) + if err != nil { + t.Fatalf("failed to insert sample pod %v", err.Error()) + } + + builder := kcollectors.NewBuilder(context.TODO()) + builder.WithEnabledCollectors(options.DefaultCollectors.AsSlice()) + builder.WithKubeClient(kubeClient) + builder.WithNamespaces(options.DefaultNamespaces) + + l, err := whiteblacklist.New(map[string]struct{}{}, map[string]struct{}{}) + if err != nil { + t.Fatal(err) + } + builder.WithWhiteBlackList(l) + + collectors := builder.Build() + + // Wait for caches to fill + time.Sleep(time.Second) + + handler := metricHandler{collectors, false} + req := httptest.NewRequest("GET", "http://localhost:8080/metrics", nil) + + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + + resp := w.Result() + if resp.StatusCode != 200 { + t.Fatalf("expected 200 status code but got %v", resp.StatusCode) + } + + body, _ := ioutil.ReadAll(resp.Body) + + expected := `# HELP kube_pod_info Information about pod. +# TYPE kube_pod_info gauge +kube_pod_info{namespace="default",pod="pod0",host_ip="1.1.1.1",pod_ip="1.2.3.4",uid="abc-123-xxx",node="node1",created_by_kind="",created_by_name=""} 1 +# HELP kube_pod_start_time Start time in unix timestamp for a pod. +# TYPE kube_pod_start_time gauge +# HELP kube_pod_completion_time Completion time in unix timestamp for a pod. +# TYPE kube_pod_completion_time gauge +# HELP kube_pod_owner Information about the Pod's owner. +# TYPE kube_pod_owner gauge +kube_pod_owner{namespace="default",pod="pod0",owner_kind="",owner_name="",owner_is_controller=""} 1 +# HELP kube_pod_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_pod_labels gauge +kube_pod_labels{namespace="default",pod="pod0"} 1 +# HELP kube_pod_created Unix creation timestamp +# TYPE kube_pod_created gauge +kube_pod_created{namespace="default",pod="pod0"} 1.5e+09 +# HELP kube_pod_status_scheduled_time Unix timestamp when pod moved into scheduled status +# TYPE kube_pod_status_scheduled_time gauge +# HELP kube_pod_status_phase The pods current phase. +# TYPE kube_pod_status_phase gauge +kube_pod_status_phase{namespace="default",pod="pod0",phase="Pending"} 0 +kube_pod_status_phase{namespace="default",pod="pod0",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default",pod="pod0",phase="Failed"} 0 +kube_pod_status_phase{namespace="default",pod="pod0",phase="Running"} 1 +kube_pod_status_phase{namespace="default",pod="pod0",phase="Unknown"} 0 +# HELP kube_pod_status_ready Describes whether the pod is ready to serve requests. +# TYPE kube_pod_status_ready gauge +# HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. +# TYPE kube_pod_status_scheduled gauge +# HELP kube_pod_container_info Information about a container in a pod. +# TYPE kube_pod_container_info gauge +kube_pod_container_info{namespace="default",pod="pod0",container="container2",image="k8s.gcr.io/hyperkube2",image_id="docker://sha256:bbb",container_id="docker://cd456"} 1 +kube_pod_container_info{namespace="default",pod="pod0",container="container3",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",container_id="docker://ef789"} 1 +# HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state. +# TYPE kube_pod_container_status_waiting gauge +kube_pod_container_status_waiting{namespace="default",pod="pod0",container="container2"} 1 +kube_pod_container_status_waiting{namespace="default",pod="pod0",container="container3"} 0 +# HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state. +# TYPE kube_pod_container_status_waiting_reason gauge +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CrashLoopBackOff"} 1 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ImagePullBackOff"} 0 +# HELP kube_pod_container_status_running Describes whether the container is currently in running state. +# TYPE kube_pod_container_status_running gauge +kube_pod_container_status_running{namespace="default",pod="pod0",container="container2"} 0 +kube_pod_container_status_running{namespace="default",pod="pod0",container="container3"} 0 +# HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state. +# TYPE kube_pod_container_status_terminated gauge +kube_pod_container_status_terminated{namespace="default",pod="pod0",container="container2"} 0 +kube_pod_container_status_terminated{namespace="default",pod="pod0",container="container3"} 0 +# HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state. +# TYPE kube_pod_container_status_terminated_reason gauge +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCannotRun"} 0 +# HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state. +# TYPE kube_pod_container_status_last_terminated_reason gauge +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="OOMKilled"} 1 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCannotRun"} 0 +# HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded. +# TYPE kube_pod_container_status_ready gauge +kube_pod_container_status_ready{namespace="default",pod="pod0",container="container2"} 0 +kube_pod_container_status_ready{namespace="default",pod="pod0",container="container3"} 0 +# HELP kube_pod_container_status_restarts_total The number of container restarts per container. +# TYPE kube_pod_container_status_restarts_total counter +kube_pod_container_status_restarts_total{namespace="default",pod="pod0",container="container2"} 0 +kube_pod_container_status_restarts_total{namespace="default",pod="pod0",container="container3"} 0 +# HELP kube_pod_container_resource_requests The number of requested request resource by a container. +# TYPE kube_pod_container_resource_requests gauge +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="nvidia_com_gpu",unit="integer"} 1 +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="cpu",unit="core"} 0.2 +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="memory",unit="byte"} 1e+08 +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="ephemeral_storage",unit="byte"} 3e+08 +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="storage",unit="byte"} 4e+08 +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con2",node="node1",resource="cpu",unit="core"} 0.3 +kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con2",node="node1",resource="memory",unit="byte"} 2e+08 +# HELP kube_pod_container_resource_limits The number of requested limit resource by a container. +# TYPE kube_pod_container_resource_limits gauge +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="nvidia_com_gpu",unit="integer"} 1 +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="cpu",unit="core"} 0.2 +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="memory",unit="byte"} 1e+08 +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="ephemeral_storage",unit="byte"} 3e+08 +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="storage",unit="byte"} 4e+08 +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con2",node="node1",resource="memory",unit="byte"} 2e+08 +kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con2",node="node1",resource="cpu",unit="core"} 0.3 +# HELP kube_pod_container_resource_requests_cpu_cores The number of requested cpu cores by a container. +# TYPE kube_pod_container_resource_requests_cpu_cores gauge +kube_pod_container_resource_requests_cpu_cores{namespace="default",pod="pod0",container="pod1_con1",node="node1"} 0.2 +kube_pod_container_resource_requests_cpu_cores{namespace="default",pod="pod0",container="pod1_con2",node="node1"} 0.3 +# HELP kube_pod_container_resource_requests_memory_bytes The number of requested memory bytes by a container. +# TYPE kube_pod_container_resource_requests_memory_bytes gauge +kube_pod_container_resource_requests_memory_bytes{namespace="default",pod="pod0",container="pod1_con1",node="node1"} 1e+08 +kube_pod_container_resource_requests_memory_bytes{namespace="default",pod="pod0",container="pod1_con2",node="node1"} 2e+08 +# HELP kube_pod_container_resource_limits_cpu_cores The limit on cpu cores to be used by a container. +# TYPE kube_pod_container_resource_limits_cpu_cores gauge +kube_pod_container_resource_limits_cpu_cores{namespace="default",pod="pod0",container="pod1_con1",node="node1"} 0.2 +kube_pod_container_resource_limits_cpu_cores{namespace="default",pod="pod0",container="pod1_con2",node="node1"} 0.3 +# HELP kube_pod_container_resource_limits_memory_bytes The limit on memory to be used by a container in bytes. +# TYPE kube_pod_container_resource_limits_memory_bytes gauge +kube_pod_container_resource_limits_memory_bytes{namespace="default",pod="pod0",container="pod1_con1",node="node1"} 1e+08 +kube_pod_container_resource_limits_memory_bytes{namespace="default",pod="pod0",container="pod1_con2",node="node1"} 2e+08 +# HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod. +# TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge +# HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only. +# TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge` + + expectedSplit := strings.Split(strings.TrimSpace(expected), "\n") + sort.Strings(expectedSplit) + + gotSplit := strings.Split(strings.TrimSpace(string(body)), "\n") + + gotFiltered := []string{} + for _, l := range gotSplit { + if strings.Contains(l, "kube_pod_") { + gotFiltered = append(gotFiltered, l) + } + } + + sort.Strings(gotFiltered) + + if len(expectedSplit) != len(gotFiltered) { + t.Fatal("expected different output length") + } + + for i := 0; i < len(expectedSplit); i++ { + if expectedSplit[i] != gotFiltered[i] { + t.Fatalf("expected %v, but got %v", expectedSplit[i], gotFiltered[i]) + } + } +} + +func injectFixtures(client *fake.Clientset, multiplier int) error { + creators := []func(*fake.Clientset, int) error{ + configMap, + service, + pod, + } + + for _, c := range creators { + for i := 0; i < multiplier; i++ { + err := c(client, i) + + if err != nil { + return err + } + } + } + + return nil +} + +func configMap(client *fake.Clientset, index int) error { + i := strconv.Itoa(index) + + configMap := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap" + i, + ResourceVersion: "123456", + }, + } + _, err := client.CoreV1().ConfigMaps(metav1.NamespaceDefault).Create(&configMap) + return err +} + +func service(client *fake.Clientset, index int) error { + i := strconv.Itoa(index) + + service := v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service" + i, + ResourceVersion: "123456", + }, + } + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(&service) + return err +} + +func pod(client *fake.Clientset, index int) error { + i := strconv.Itoa(index) + + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod" + i, + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + UID: "abc-123-xxx", + }, + Spec: v1.PodSpec{ + NodeName: "node1", + Containers: []v1.Container{ + v1.Container{ + Name: "pod1_con1", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("100M"), + v1.ResourceEphemeralStorage: resource.MustParse("300M"), + v1.ResourceStorage: resource.MustParse("400M"), + v1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("100M"), + v1.ResourceEphemeralStorage: resource.MustParse("300M"), + v1.ResourceStorage: resource.MustParse("400M"), + v1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"), + }, + }, + }, + v1.Container{ + Name: "pod1_con2", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("200M"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("200M"), + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + HostIP: "1.1.1.1", + PodIP: "1.2.3.4", + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container2", + Image: "k8s.gcr.io/hyperkube2", + ImageID: "docker://sha256:bbb", + ContainerID: "docker://cd456", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CrashLoopBackOff", + }, + }, + LastTerminationState: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: "OOMKilled", + }, + }, + }, + v1.ContainerStatus{ + Name: "container3", + Image: "k8s.gcr.io/hyperkube3", + ImageID: "docker://sha256:ccc", + ContainerID: "docker://ef789", + }, + }, + }, + } + + _, err := client.CoreV1().Pods(metav1.NamespaceDefault).Create(&pod) + return err +} diff --git a/pkg/collectors/builder.go b/pkg/collectors/builder.go new file mode 100644 index 0000000000..297761bdbe --- /dev/null +++ b/pkg/collectors/builder.go @@ -0,0 +1,511 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: rename collector +package collectors + +import ( + "sort" + "strings" + + "k8s.io/kube-state-metrics/pkg/metrics" + metricsstore "k8s.io/kube-state-metrics/pkg/metrics_store" + "k8s.io/kube-state-metrics/pkg/options" + + apps "k8s.io/api/apps/v1beta1" + autoscaling "k8s.io/api/autoscaling/v2beta1" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + policy "k8s.io/api/policy/v1beta1" + + "github.com/golang/glog" + "golang.org/x/net/context" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +type whiteBlackLister interface { + IsIncluded(string) bool + IsExcluded(string) bool +} + +// Builder helps to build collectors. It follows the builder pattern +// (https://en.wikipedia.org/wiki/Builder_pattern). +type Builder struct { + kubeClient clientset.Interface + namespaces options.NamespaceList + ctx context.Context + enabledCollectors []string + whiteBlackList whiteBlackLister +} + +// NewBuilder returns a new builder. +func NewBuilder( + ctx context.Context, +) *Builder { + return &Builder{ + ctx: ctx, + } +} + +// WithEnabledCollectors sets the enabledCollectors property of a Builder. +func (b *Builder) WithEnabledCollectors(c []string) { + copy := []string{} + for _, s := range c { + copy = append(copy, s) + } + + sort.Strings(copy) + + b.enabledCollectors = copy +} + +// WithNamespaces sets the namespaces property of a Builder. +func (b *Builder) WithNamespaces(n options.NamespaceList) { + b.namespaces = n +} + +// WithKubeClient sets the kubeClient property of a Builder. +func (b *Builder) WithKubeClient(c clientset.Interface) { + b.kubeClient = c +} + +// WithWhiteBlackList configures the white or blacklisted metrics to be exposed +// by the collectors build by the Builder +func (b *Builder) WithWhiteBlackList(l whiteBlackLister) { + b.whiteBlackList = l +} + +// Build initializes and registers all enabled collectors. +func (b *Builder) Build() []*Collector { + if b.whiteBlackList == nil { + panic("whiteBlackList should not be nil") + } + + collectors := []*Collector{} + activeCollectorNames := []string{} + + for _, c := range b.enabledCollectors { + constructor, ok := availableCollectors[c] + if ok { + collector := constructor(b) + activeCollectorNames = append(activeCollectorNames, c) + collectors = append(collectors, collector) + } + } + + glog.Infof("Active collectors: %s", strings.Join(activeCollectorNames, ",")) + + return collectors +} + +var availableCollectors = map[string]func(f *Builder) *Collector{ + "configmaps": func(b *Builder) *Collector { return b.buildConfigMapCollector() }, + "cronjobs": func(b *Builder) *Collector { return b.buildCronJobCollector() }, + "daemonsets": func(b *Builder) *Collector { return b.buildDaemonSetCollector() }, + "deployments": func(b *Builder) *Collector { return b.buildDeploymentCollector() }, + "endpoints": func(b *Builder) *Collector { return b.buildEndpointsCollector() }, + "horizontalpodautoscalers": func(b *Builder) *Collector { return b.buildHPACollector() }, + "jobs": func(b *Builder) *Collector { return b.buildJobCollector() }, + "limitranges": func(b *Builder) *Collector { return b.buildLimitRangeCollector() }, + "namespaces": func(b *Builder) *Collector { return b.buildNamespaceCollector() }, + "nodes": func(b *Builder) *Collector { return b.buildNodeCollector() }, + "persistentvolumeclaims": func(b *Builder) *Collector { return b.buildPersistentVolumeClaimCollector() }, + "persistentvolumes": func(b *Builder) *Collector { return b.buildPersistentVolumeCollector() }, + "poddisruptionbudgets": func(b *Builder) *Collector { return b.buildPodDisruptionBudgetCollector() }, + "pods": func(b *Builder) *Collector { return b.buildPodCollector() }, + "replicasets": func(b *Builder) *Collector { return b.buildReplicaSetCollector() }, + "replicationcontrollers": func(b *Builder) *Collector { return b.buildReplicationControllerCollector() }, + "resourcequotas": func(b *Builder) *Collector { return b.buildResourceQuotaCollector() }, + "secrets": func(b *Builder) *Collector { return b.buildSecretCollector() }, + "services": func(b *Builder) *Collector { return b.buildServiceCollector() }, + "statefulsets": func(b *Builder) *Collector { return b.buildStatefulSetCollector() }, +} + +func (b *Builder) buildConfigMapCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, configMapMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.ConfigMap{}, store, b.namespaces, createConfigMapListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildCronJobCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, cronJobMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &batchv1beta1.CronJob{}, store, b.namespaces, createCronJobListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildDaemonSetCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, daemonSetMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &extensions.DaemonSet{}, store, b.namespaces, createDaemonSetListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildDeploymentCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, deploymentMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &extensions.Deployment{}, store, b.namespaces, createDeploymentListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildEndpointsCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, endpointMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.Endpoints{}, store, b.namespaces, createEndpointsListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildHPACollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, hpaMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &autoscaling.HorizontalPodAutoscaler{}, store, b.namespaces, createHPAListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildJobCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, jobMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &batchv1.Job{}, store, b.namespaces, createJobListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildLimitRangeCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, limitRangeMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.LimitRange{}, store, b.namespaces, createLimitRangeListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildNamespaceCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, namespaceMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.Namespace{}, store, b.namespaces, createNamespaceListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildNodeCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, nodeMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.Node{}, store, b.namespaces, createNodeListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildPersistentVolumeClaimCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, persistentVolumeClaimMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.PersistentVolumeClaim{}, store, b.namespaces, createPersistentVolumeClaimListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildPersistentVolumeCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, persistentVolumeMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.PersistentVolume{}, store, b.namespaces, createPersistentVolumeListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildPodDisruptionBudgetCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, podDisruptionBudgetMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &policy.PodDisruptionBudget{}, store, b.namespaces, createPodDisruptionBudgetListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildReplicaSetCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, replicaSetMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &extensions.ReplicaSet{}, store, b.namespaces, createReplicaSetListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildReplicationControllerCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, replicationControllerMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.ReplicationController{}, store, b.namespaces, createReplicationControllerListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildResourceQuotaCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, resourceQuotaMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.ResourceQuota{}, store, b.namespaces, createResourceQuotaListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildSecretCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, secretMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.Secret{}, store, b.namespaces, createSecretListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildServiceCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, serviceMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.Service{}, store, b.namespaces, createServiceListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildStatefulSetCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, statefulSetMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &apps.StatefulSet{}, store, b.namespaces, createStatefulSetListWatch) + + return NewCollector(store) +} + +func (b *Builder) buildPodCollector() *Collector { + filteredMetricFamilies := filterMetricFamilies(b.whiteBlackList, podMetricFamilies) + composedMetricGenFuncs := composeMetricGenFuncs(filteredMetricFamilies) + + familyHeaders := extractMetricFamilyHeaders(filteredMetricFamilies) + + store := metricsstore.NewMetricsStore( + familyHeaders, + composedMetricGenFuncs, + ) + reflectorPerNamespace(b.ctx, b.kubeClient, &v1.Pod{}, store, b.namespaces, createPodListWatch) + + return NewCollector(store) +} + +func extractMetricFamilyHeaders(families []metrics.FamilyGenerator) []string { + headers := make([]string, len(families)) + + for i, f := range families { + header := strings.Builder{} + + header.WriteString("# HELP ") + header.WriteString(f.Name) + header.WriteByte(' ') + header.WriteString(f.Help) + header.WriteByte('\n') + header.WriteString("# TYPE ") + header.WriteString(f.Name) + header.WriteByte(' ') + header.WriteString(string(f.Type)) + + headers[i] = header.String() + } + + return headers +} + +// composeMetricGenFuncs takes a slice of metric families and returns a function +// that composes their metric generation functions into a single one. +func composeMetricGenFuncs(families []metrics.FamilyGenerator) func(obj interface{}) []metricsstore.FamilyStringer { + funcs := []func(obj interface{}) metrics.Family{} + + for _, f := range families { + funcs = append(funcs, f.GenerateFunc) + } + + return func(obj interface{}) []metricsstore.FamilyStringer { + families := make([]metricsstore.FamilyStringer, len(funcs)) + + for i, f := range funcs { + families[i] = f(obj) + } + + return families + } +} + +// filterMetricFamilies takes a white- and a blacklist and a slice of metric +// families and returns a filtered slice. +func filterMetricFamilies(l whiteBlackLister, families []metrics.FamilyGenerator) []metrics.FamilyGenerator { + filtered := []metrics.FamilyGenerator{} + + for _, f := range families { + if l.IsIncluded(f.Name) { + filtered = append(filtered, f) + } + } + + return filtered +} + +// reflectorPerNamespace creates a Kubernetes client-go reflector with the given +// listWatchFunc for each given namespace and registers it with the given store. +func reflectorPerNamespace( + ctx context.Context, + kubeClient clientset.Interface, + expectedType interface{}, + store cache.Store, + namespaces []string, + listWatchFunc func(kubeClient clientset.Interface, ns string) cache.ListWatch, +) { + for _, ns := range namespaces { + lw := listWatchFunc(kubeClient, ns) + reflector := cache.NewReflector(&lw, expectedType, store, 0) + go reflector.Run(ctx.Done()) + } +} diff --git a/pkg/collectors/collectors.go b/pkg/collectors/collectors.go new file mode 100644 index 0000000000..4a7a9560bd --- /dev/null +++ b/pkg/collectors/collectors.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "io" +) + +// Store represents a metrics store e.g. +// k8s.io/kube-state-metrics/pkg/metrics_store. +type Store interface { + WriteAll(io.Writer) +} + +// Collector represents a kube-state-metrics metric collector. It is a stripped +// down version of the Prometheus client_golang collector. +type Collector struct { + Store Store +} + +// NewCollector constructs a collector with the given Store. +func NewCollector(s Store) *Collector { + return &Collector{s} +} + +// Collect returns all metrics of the underlying store of the collector. +func (c *Collector) Collect(w io.Writer) { + c.Store.WriteAll(w) +} diff --git a/pkg/collectors/configmap.go b/pkg/collectors/configmap.go new file mode 100644 index 0000000000..a8c62a0a53 --- /dev/null +++ b/pkg/collectors/configmap.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descConfigMapLabelsDefaultLabels = []string{"namespace", "configmap"} + + configMapMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_configmap_info", + Type: metrics.MetricTypeGauge, + Help: "Information about configmap.", + GenerateFunc: wrapConfigMapFunc(func(c *v1.ConfigMap) metrics.Family { + return metrics.Family{ + &metrics.Metric{ + Name: "kube_configmap_info", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: 1, + }, + } + }), + }, + metrics.FamilyGenerator{ + Name: "kube_configmap_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapConfigMapFunc(func(c *v1.ConfigMap) metrics.Family { + f := metrics.Family{} + + if !c.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_configmap_created", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(c.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_configmap_metadata_resource_version", + Type: metrics.MetricTypeGauge, + Help: "Resource version representing a specific version of the configmap.", + GenerateFunc: wrapConfigMapFunc(func(c *v1.ConfigMap) metrics.Family { + return metrics.Family{ + &metrics.Metric{ + Name: "kube_configmap_metadata_resource_version", + LabelKeys: []string{"resource_version"}, + LabelValues: []string{string(c.ObjectMeta.ResourceVersion)}, + Value: 1, + }, + } + }), + }, + } +) + +func createConfigMapListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().ConfigMaps(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().ConfigMaps(ns).Watch(opts) + }, + } +} + +func wrapConfigMapFunc(f func(*v1.ConfigMap) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + configMap := obj.(*v1.ConfigMap) + + metricFamily := f(configMap) + + for _, m := range metricFamily { + m.LabelKeys = append(descConfigMapLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{configMap.Namespace, configMap.Name}, m.LabelValues...) + } + + return metricFamily + } +} diff --git a/pkg/collectors/configmap_test.go b/pkg/collectors/configmap_test.go new file mode 100644 index 0000000000..8e521f22cf --- /dev/null +++ b/pkg/collectors/configmap_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestConfigMapCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + + startTime := 1501569018 + metav1StartTime := metav1.Unix(int64(startTime), 0) + + const metadata = ` + # HELP kube_configmap_info Information about configmap. + # TYPE kube_configmap_info gauge + # HELP kube_configmap_created Unix creation timestamp + # TYPE kube_configmap_created gauge + # HELP kube_configmap_metadata_resource_version Resource version representing a specific version of the configmap. + # TYPE kube_configmap_metadata_resource_version gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap1", + Namespace: "ns1", + ResourceVersion: "123456", + }, + }, + Want: ` + kube_configmap_info{configmap="configmap1",namespace="ns1"} 1 + kube_configmap_metadata_resource_version{configmap="configmap1",namespace="ns1",resource_version="123456"} 1 +`, + MetricNames: []string{"kube_configmap_info", "kube_configmap_metadata_resource_version"}, + }, + { + Obj: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "configmap2", + Namespace: "ns2", + CreationTimestamp: metav1StartTime, + ResourceVersion: "abcdef", + }, + }, + Want: ` + kube_configmap_info{configmap="configmap2",namespace="ns2"} 1 + kube_configmap_created{configmap="configmap2",namespace="ns2"} 1.501569018e+09 + kube_configmap_metadata_resource_version{configmap="configmap2",namespace="ns2",resource_version="abcdef"} 1 + `, + MetricNames: []string{"kube_configmap_info", "kube_configmap_created", "kube_configmap_metadata_resource_version"}, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(configMapMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/cronjob.go b/pkg/collectors/cronjob.go new file mode 100644 index 0000000000..a216c2eae3 --- /dev/null +++ b/pkg/collectors/cronjob.go @@ -0,0 +1,227 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "time" + + "k8s.io/kube-state-metrics/pkg/metrics" + + batchv1beta1 "k8s.io/api/batch/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + "github.com/robfig/cron" +) + +var ( + descCronJobLabelsName = "kube_cronjob_labels" + descCronJobLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descCronJobLabelsDefaultLabels = []string{"namespace", "cronjob"} + + cronJobMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: descCronJobLabelsName, + Type: metrics.MetricTypeGauge, + Help: descCronJobLabelsHelp, + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(j.Labels) + return metrics.Family{ + &metrics.Metric{ + Name: descCronJobLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }, + } + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_info", + Type: metrics.MetricTypeGauge, + Help: "Info about cronjob.", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + return metrics.Family{ + &metrics.Metric{ + Name: "kube_cronjob_info", + LabelKeys: []string{"schedule", "concurrency_policy"}, + LabelValues: []string{j.Spec.Schedule, string(j.Spec.ConcurrencyPolicy)}, + Value: 1, + }, + } + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + f := metrics.Family{} + if !j.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_cronjob_created", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(j.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_status_active", + Type: metrics.MetricTypeGauge, + Help: "Active holds pointers to currently running jobs.", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + return metrics.Family{ + &metrics.Metric{ + Name: "kube_cronjob_status_active", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(len(j.Status.Active)), + }, + } + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_status_last_schedule_time", + Type: metrics.MetricTypeGauge, + Help: "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + f := metrics.Family{} + + if j.Status.LastScheduleTime != nil { + f = append(f, &metrics.Metric{ + Name: "kube_cronjob_status_last_schedule_time", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(j.Status.LastScheduleTime.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_spec_suspend", + Type: metrics.MetricTypeGauge, + Help: "Suspend flag tells the controller to suspend subsequent executions.", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + f := metrics.Family{} + + if j.Spec.Suspend != nil { + f = append(f, &metrics.Metric{ + Name: "kube_cronjob_spec_suspend", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: boolFloat64(*j.Spec.Suspend), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_spec_starting_deadline_seconds", + Type: metrics.MetricTypeGauge, + Help: "Deadline in seconds for starting the job if it misses scheduled time for any reason.", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + f := metrics.Family{} + + if j.Spec.StartingDeadlineSeconds != nil { + f = append(f, &metrics.Metric{ + Name: "kube_cronjob_spec_starting_deadline_seconds", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(*j.Spec.StartingDeadlineSeconds), + }) + + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_cronjob_next_schedule_time", + Type: metrics.MetricTypeGauge, + Help: "Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed.", + GenerateFunc: wrapCronJobFunc(func(j *batchv1beta1.CronJob) metrics.Family { + f := metrics.Family{} + + // If the cron job is suspended, don't track the next scheduled time + nextScheduledTime, err := getNextScheduledTime(j.Spec.Schedule, j.Status.LastScheduleTime, j.CreationTimestamp) + if err != nil { + panic(err) + } else if !*j.Spec.Suspend { + f = append(f, &metrics.Metric{ + Name: "kube_cronjob_next_schedule_time", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(nextScheduledTime.Unix()), + }) + } + + return f + }), + }, + } +) + +func wrapCronJobFunc(f func(*batchv1beta1.CronJob) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + cronJob := obj.(*batchv1beta1.CronJob) + + metricFamily := f(cronJob) + + for _, m := range metricFamily { + m.LabelKeys = append(descCronJobLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{cronJob.Namespace, cronJob.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createCronJobListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.BatchV1beta1().CronJobs(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.BatchV1beta1().CronJobs(ns).Watch(opts) + }, + } +} + +func getNextScheduledTime(schedule string, lastScheduleTime *metav1.Time, createdTime metav1.Time) (time.Time, error) { + sched, err := cron.ParseStandard(schedule) + if err != nil { + return time.Time{}, fmt.Errorf("Failed to parse cron job schedule '%s': %s", schedule, err) + } + if !lastScheduleTime.IsZero() { + return sched.Next((*lastScheduleTime).Time), nil + } + if !createdTime.IsZero() { + return sched.Next(createdTime.Time), nil + } + return time.Time{}, fmt.Errorf("Created time and lastScheduleTime are both zero") +} diff --git a/pkg/collectors/cronjob_test.go b/pkg/collectors/cronjob_test.go new file mode 100644 index 0000000000..13889c1dd7 --- /dev/null +++ b/pkg/collectors/cronjob_test.go @@ -0,0 +1,227 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "fmt" + "math" + "testing" + "time" + + batchv1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + SuspendTrue = true + SuspendFalse = false + StartingDeadlineSeconds300 int64 = 300 + + // "1520742896" is "2018/3/11 12:34:56" in "Asia/Shanghai". + ActiveRunningCronJob1LastScheduleTime = time.Unix(1520742896, 0) + SuspendedCronJob1LastScheduleTime = time.Unix(1520742896+5.5*3600, 0) // 5.5 hours later + ActiveCronJob1NoLastScheduledCreationTimestamp = time.Unix(1520742896+6.5*3600, 0) +) + +func TestCronJobCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + + hour := ActiveRunningCronJob1LastScheduleTime.Hour() + ActiveRunningCronJob1NextScheduleTime := time.Time{} + switch { + case hour < 6: + ActiveRunningCronJob1NextScheduleTime = time.Date( + ActiveRunningCronJob1LastScheduleTime.Year(), + ActiveRunningCronJob1LastScheduleTime.Month(), + ActiveRunningCronJob1LastScheduleTime.Day(), + 6, + 0, + 0, 0, time.Local) + case hour < 12: + ActiveRunningCronJob1NextScheduleTime = time.Date( + ActiveRunningCronJob1LastScheduleTime.Year(), + ActiveRunningCronJob1LastScheduleTime.Month(), + ActiveRunningCronJob1LastScheduleTime.Day(), + 12, + 0, + 0, 0, time.Local) + case hour < 18: + ActiveRunningCronJob1NextScheduleTime = time.Date( + ActiveRunningCronJob1LastScheduleTime.Year(), + ActiveRunningCronJob1LastScheduleTime.Month(), + ActiveRunningCronJob1LastScheduleTime.Day(), + 18, + 0, + 0, 0, time.Local) + case hour < 24: + ActiveRunningCronJob1NextScheduleTime = time.Date( + ActiveRunningCronJob1LastScheduleTime.Year(), + ActiveRunningCronJob1LastScheduleTime.Month(), + ActiveRunningCronJob1LastScheduleTime.Day(), + 24, + 0, + 0, 0, time.Local) + } + + minute := ActiveCronJob1NoLastScheduledCreationTimestamp.Minute() + ActiveCronJob1NoLastScheduledNextScheduleTime := time.Time{} + switch { + case minute < 25: + ActiveCronJob1NoLastScheduledNextScheduleTime = time.Date( + ActiveCronJob1NoLastScheduledCreationTimestamp.Year(), + ActiveCronJob1NoLastScheduledCreationTimestamp.Month(), + ActiveCronJob1NoLastScheduledCreationTimestamp.Day(), + ActiveCronJob1NoLastScheduledCreationTimestamp.Hour(), + 25, + 0, 0, time.Local) + default: + ActiveCronJob1NoLastScheduledNextScheduleTime = time.Date( + ActiveCronJob1NoLastScheduledNextScheduleTime.Year(), + ActiveCronJob1NoLastScheduledNextScheduleTime.Month(), + ActiveCronJob1NoLastScheduledNextScheduleTime.Day(), + ActiveCronJob1NoLastScheduledNextScheduleTime.Hour()+1, + 25, + 0, 0, time.Local) + } + + const metadata = ` + # HELP kube_cronjob_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_cronjob_labels gauge + # HELP kube_cronjob_info Info about cronjob. + # TYPE kube_cronjob_info gauge + # HELP kube_cronjob_created Unix creation timestamp + # TYPE kube_cronjob_created gauge + # HELP kube_cronjob_spec_starting_deadline_seconds Deadline in seconds for starting the job if it misses scheduled time for any reason. + # TYPE kube_cronjob_spec_starting_deadline_seconds gauge + # HELP kube_cronjob_spec_suspend Suspend flag tells the controller to suspend subsequent executions. + # TYPE kube_cronjob_spec_suspend gauge + # HELP kube_cronjob_status_active Active holds pointers to currently running jobs. + # TYPE kube_cronjob_status_active gauge + # HELP kube_cronjob_status_last_schedule_time LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + # TYPE kube_cronjob_status_last_schedule_time gauge + # HELP kube_cronjob_next_schedule_time Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed. + # TYPE kube_cronjob_next_schedule_time gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ActiveRunningCronJob1", + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-active-running-1", + }, + }, + Status: batchv1beta1.CronJobStatus{ + Active: []v1.ObjectReference{{Name: "FakeJob1"}, {Name: "FakeJob2"}}, + LastScheduleTime: &metav1.Time{Time: ActiveRunningCronJob1LastScheduleTime}, + }, + Spec: batchv1beta1.CronJobSpec{ + StartingDeadlineSeconds: &StartingDeadlineSeconds300, + ConcurrencyPolicy: "Forbid", + Suspend: &SuspendFalse, + Schedule: "0 */6 * * *", + }, + }, + Want: ` + kube_cronjob_info{concurrency_policy="Forbid",cronjob="ActiveRunningCronJob1",namespace="ns1",schedule="0 */6 * * *"} 1 + kube_cronjob_labels{cronjob="ActiveRunningCronJob1",label_app="example-active-running-1",namespace="ns1"} 1 + kube_cronjob_spec_starting_deadline_seconds{cronjob="ActiveRunningCronJob1",namespace="ns1"} 300 + kube_cronjob_spec_suspend{cronjob="ActiveRunningCronJob1",namespace="ns1"} 0 + kube_cronjob_status_active{cronjob="ActiveRunningCronJob1",namespace="ns1"} 2 + kube_cronjob_status_last_schedule_time{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1.520742896e+09 +` + fmt.Sprintf("kube_cronjob_next_schedule_time{cronjob=\"ActiveRunningCronJob1\",namespace=\"ns1\"} %ve+09\n", + float64(ActiveRunningCronJob1NextScheduleTime.Unix())/math.Pow10(9)), + MetricNames: []string{"kube_cronjob_next_schedule_time", "kube_cronjob_spec_starting_deadline_seconds", "kube_cronjob_status_active", "kube_cronjob_spec_suspend", "kube_cronjob_info", "kube_cronjob_created", "kube_cronjob_labels", "kube_cronjob_status_last_schedule_time"}, + }, + { + Obj: &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "SuspendedCronJob1", + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-suspended-1", + }, + }, + Status: batchv1beta1.CronJobStatus{ + Active: []v1.ObjectReference{}, + LastScheduleTime: &metav1.Time{Time: SuspendedCronJob1LastScheduleTime}, + }, + Spec: batchv1beta1.CronJobSpec{ + StartingDeadlineSeconds: &StartingDeadlineSeconds300, + ConcurrencyPolicy: "Forbid", + Suspend: &SuspendTrue, + Schedule: "0 */3 * * *", + }, + }, + Want: ` + kube_cronjob_info{concurrency_policy="Forbid",cronjob="SuspendedCronJob1",namespace="ns1",schedule="0 */3 * * *"} 1 + kube_cronjob_labels{cronjob="SuspendedCronJob1",label_app="example-suspended-1",namespace="ns1"} 1 + kube_cronjob_spec_starting_deadline_seconds{cronjob="SuspendedCronJob1",namespace="ns1"} 300 + kube_cronjob_spec_suspend{cronjob="SuspendedCronJob1",namespace="ns1"} 1 + kube_cronjob_status_active{cronjob="SuspendedCronJob1",namespace="ns1"} 0 + kube_cronjob_status_last_schedule_time{cronjob="SuspendedCronJob1",namespace="ns1"} 1.520762696e+09 +`, + MetricNames: []string{"kube_cronjob_spec_starting_deadline_seconds", "kube_cronjob_status_active", "kube_cronjob_spec_suspend", "kube_cronjob_info", "kube_cronjob_created", "kube_cronjob_labels", "kube_cronjob_status_last_schedule_time"}, + }, + { + Obj: &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ActiveCronJob1NoLastScheduled", + CreationTimestamp: metav1.Time{Time: ActiveCronJob1NoLastScheduledCreationTimestamp}, + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-active-no-last-scheduled-1", + }, + }, + Status: batchv1beta1.CronJobStatus{ + Active: []v1.ObjectReference{}, + LastScheduleTime: nil, + }, + Spec: batchv1beta1.CronJobSpec{ + StartingDeadlineSeconds: &StartingDeadlineSeconds300, + ConcurrencyPolicy: "Forbid", + Suspend: &SuspendFalse, + Schedule: "25 * * * *", + }, + }, + Want: ` + kube_cronjob_spec_starting_deadline_seconds{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 300 + kube_cronjob_status_active{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 0 + kube_cronjob_spec_suspend{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 0 + kube_cronjob_info{concurrency_policy="Forbid",cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1",schedule="25 * * * *"} 1 + kube_cronjob_created{cronjob="ActiveCronJob1NoLastScheduled",namespace="ns1"} 1.520766296e+09 + kube_cronjob_labels{cronjob="ActiveCronJob1NoLastScheduled",label_app="example-active-no-last-scheduled-1",namespace="ns1"} 1 +` + + fmt.Sprintf("kube_cronjob_next_schedule_time{cronjob=\"ActiveCronJob1NoLastScheduled\",namespace=\"ns1\"} %ve+09\n", + float64(ActiveCronJob1NoLastScheduledNextScheduleTime.Unix())/math.Pow10(9)), + // TODO: Do we need to specify metricnames? + MetricNames: []string{"kube_cronjob_next_schedule_time", "kube_cronjob_spec_starting_deadline_seconds", "kube_cronjob_status_active", "kube_cronjob_spec_suspend", "kube_cronjob_info", "kube_cronjob_created", "kube_cronjob_labels"}, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(cronJobMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/daemonset.go b/pkg/collectors/daemonset.go new file mode 100644 index 0000000000..60e1c092c7 --- /dev/null +++ b/pkg/collectors/daemonset.go @@ -0,0 +1,200 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descDaemonSetLabelsName = "kube_daemonset_labels" + descDaemonSetLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descDaemonSetLabelsDefaultLabels = []string{"namespace", "daemonset"} + + daemonSetMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_daemonset_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + f := metrics.Family{} + + if !d.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_daemonset_created", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_status_current_number_scheduled", + Type: metrics.MetricTypeGauge, + Help: "The number of nodes running at least one daemon pod and are supposed to.", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{ + &metrics.Metric{ + Name: "kube_daemonset_status_current_number_scheduled", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.Status.CurrentNumberScheduled), + }, + } + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_status_desired_number_scheduled", + Type: metrics.MetricTypeGauge, + Help: "The number of nodes that should be running the daemon pod.", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_status_desired_number_scheduled", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.Status.DesiredNumberScheduled), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_status_number_available", + Type: metrics.MetricTypeGauge, + Help: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_status_number_available", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.Status.NumberAvailable), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_status_number_misscheduled", + Type: metrics.MetricTypeGauge, + Help: "The number of nodes running a daemon pod but are not supposed to.", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_status_number_misscheduled", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.Status.NumberMisscheduled), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_status_number_ready", + Type: metrics.MetricTypeGauge, + Help: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_status_number_ready", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.Status.NumberReady), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_status_number_unavailable", + Type: metrics.MetricTypeGauge, + Help: "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_status_number_unavailable", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.Status.NumberUnavailable), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_updated_number_scheduled", + Type: metrics.MetricTypeGauge, + Help: "The total number of nodes that are running updated daemon pod", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_updated_number_scheduled", + Value: float64(d.Status.UpdatedNumberScheduled), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_daemonset_metadata_generation", + Type: metrics.MetricTypeGauge, + Help: "Sequence number representing a specific generation of the desired state.", + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_daemonset_metadata_generation", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(d.ObjectMeta.Generation), + }} + }), + }, + metrics.FamilyGenerator{ + Name: descDaemonSetLabelsName, + Type: metrics.MetricTypeGauge, + Help: descDaemonSetLabelsHelp, + GenerateFunc: wrapDaemonSetFunc(func(d *v1beta1.DaemonSet) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(d.ObjectMeta.Labels) + return metrics.Family{&metrics.Metric{ + Name: descDaemonSetLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + } +) + +func wrapDaemonSetFunc(f func(*v1beta1.DaemonSet) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + daemonSet := obj.(*v1beta1.DaemonSet) + + metricFamily := f(daemonSet) + + for _, m := range metricFamily { + m.LabelKeys = append(descDaemonSetLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{daemonSet.Namespace, daemonSet.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createDaemonSetListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.ExtensionsV1beta1().DaemonSets(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.ExtensionsV1beta1().DaemonSets(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/daemonset_test.go b/pkg/collectors/daemonset_test.go new file mode 100644 index 0000000000..23ecfd8549 --- /dev/null +++ b/pkg/collectors/daemonset_test.go @@ -0,0 +1,189 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestDaemonSetCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_daemonset_created Unix creation timestamp + # TYPE kube_daemonset_created gauge + # HELP kube_daemonset_metadata_generation Sequence number representing a specific generation of the desired state. + # TYPE kube_daemonset_metadata_generation gauge + # HELP kube_daemonset_status_current_number_scheduled The number of nodes running at least one daemon pod and are supposed to. + # TYPE kube_daemonset_status_current_number_scheduled gauge + # HELP kube_daemonset_status_number_misscheduled The number of nodes running a daemon pod but are not supposed to. + # TYPE kube_daemonset_status_number_misscheduled gauge + # HELP kube_daemonset_status_desired_number_scheduled The number of nodes that should be running the daemon pod. + # TYPE kube_daemonset_status_desired_number_scheduled gauge + # HELP kube_daemonset_status_number_available The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available + # TYPE kube_daemonset_status_number_available gauge + # HELP kube_daemonset_status_number_ready The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. + # TYPE kube_daemonset_status_number_ready gauge + # HELP kube_daemonset_status_number_unavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available + # TYPE kube_daemonset_status_number_unavailable gauge + # HELP kube_daemonset_updated_number_scheduled The total number of nodes that are running updated daemon pod + # TYPE kube_daemonset_updated_number_scheduled gauge + # HELP kube_daemonset_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_daemonset_labels gauge +` + cases := []generateMetricsTestCase{ + { + Obj: &v1beta1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ds1", + Namespace: "ns1", + Labels: map[string]string{ + "app": "example1", + }, + Generation: 21, + }, + Status: v1beta1.DaemonSetStatus{ + CurrentNumberScheduled: 15, + NumberMisscheduled: 10, + DesiredNumberScheduled: 5, + NumberReady: 5, + }, + }, + Want: ` + kube_daemonset_metadata_generation{daemonset="ds1",namespace="ns1"} 21 + kube_daemonset_status_current_number_scheduled{daemonset="ds1",namespace="ns1"} 15 + kube_daemonset_status_desired_number_scheduled{daemonset="ds1",namespace="ns1"} 5 + kube_daemonset_status_number_available{daemonset="ds1",namespace="ns1"} 0 + kube_daemonset_status_number_misscheduled{daemonset="ds1",namespace="ns1"} 10 + kube_daemonset_status_number_ready{daemonset="ds1",namespace="ns1"} 5 + kube_daemonset_status_number_unavailable{daemonset="ds1",namespace="ns1"} 0 + kube_daemonset_updated_number_scheduled{daemonset="ds1",namespace="ns1"} 0 + kube_daemonset_labels{daemonset="ds1",label_app="example1",namespace="ns1"} 1 +`, + MetricNames: []string{ + "kube_daemonset_labels", + "kube_daemonset_metadata_generation", + "kube_daemonset_status_current_number_scheduled", + "kube_daemonset_status_desired_number_scheduled", + "kube_daemonset_status_number_available", + "kube_daemonset_status_number_misscheduled", + "kube_daemonset_status_number_ready", + "kube_daemonset_status_number_unavailable", + "kube_daemonset_updated_number_scheduled", + }, + }, + { + Obj: &v1beta1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ds2", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns2", + Labels: map[string]string{ + "app": "example2", + }, + Generation: 14, + }, + Status: v1beta1.DaemonSetStatus{ + CurrentNumberScheduled: 10, + NumberMisscheduled: 5, + DesiredNumberScheduled: 0, + NumberReady: 0, + }, + }, + Want: ` + kube_daemonset_created{daemonset="ds2",namespace="ns2"} 1.5e+09 + kube_daemonset_metadata_generation{daemonset="ds2",namespace="ns2"} 14 + kube_daemonset_status_current_number_scheduled{daemonset="ds2",namespace="ns2"} 10 + kube_daemonset_status_desired_number_scheduled{daemonset="ds2",namespace="ns2"} 0 + kube_daemonset_status_number_available{daemonset="ds2",namespace="ns2"} 0 + kube_daemonset_status_number_misscheduled{daemonset="ds2",namespace="ns2"} 5 + kube_daemonset_status_number_ready{daemonset="ds2",namespace="ns2"} 0 + kube_daemonset_status_number_unavailable{daemonset="ds2",namespace="ns2"} 0 + kube_daemonset_updated_number_scheduled{daemonset="ds2",namespace="ns2"} 0 + kube_daemonset_labels{daemonset="ds2",label_app="example2",namespace="ns2"} 1 +`, + MetricNames: []string{ + "kube_daemonset_created", + "kube_daemonset_labels", + "kube_daemonset_metadata_generation", + "kube_daemonset_status_current_number_scheduled", + "kube_daemonset_status_desired_number_scheduled", + "kube_daemonset_status_number_available", + "kube_daemonset_status_number_misscheduled", + "kube_daemonset_status_number_ready", + "kube_daemonset_status_number_unavailable", + "kube_daemonset_updated_number_scheduled", + }, + }, + { + Obj: &v1beta1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ds3", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns3", + Labels: map[string]string{ + "app": "example3", + }, + Generation: 15, + }, + Status: v1beta1.DaemonSetStatus{ + CurrentNumberScheduled: 10, + NumberMisscheduled: 5, + DesiredNumberScheduled: 15, + NumberReady: 5, + NumberAvailable: 5, + NumberUnavailable: 5, + UpdatedNumberScheduled: 5, + }, + }, + Want: ` + kube_daemonset_created{daemonset="ds3",namespace="ns3"} 1.5e+09 + kube_daemonset_metadata_generation{daemonset="ds3",namespace="ns3"} 15 + kube_daemonset_status_current_number_scheduled{daemonset="ds3",namespace="ns3"} 10 + kube_daemonset_status_desired_number_scheduled{daemonset="ds3",namespace="ns3"} 15 + kube_daemonset_status_number_available{daemonset="ds3",namespace="ns3"} 5 + kube_daemonset_status_number_misscheduled{daemonset="ds3",namespace="ns3"} 5 + kube_daemonset_status_number_ready{daemonset="ds3",namespace="ns3"} 5 + kube_daemonset_status_number_unavailable{daemonset="ds3",namespace="ns3"} 5 + kube_daemonset_updated_number_scheduled{daemonset="ds3",namespace="ns3"} 5 + kube_daemonset_labels{daemonset="ds3",label_app="example3",namespace="ns3"} 1 +`, + MetricNames: []string{ + "kube_daemonset_created", + "kube_daemonset_labels", + "kube_daemonset_metadata_generation", + "kube_daemonset_status_current_number_scheduled", + "kube_daemonset_status_desired_number_scheduled", + "kube_daemonset_status_number_available", + "kube_daemonset_status_number_misscheduled", + "kube_daemonset_status_number_ready", + "kube_daemonset_status_number_unavailable", + "kube_daemonset_updated_number_scheduled", + }, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(daemonSetMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/deployment.go b/pkg/collectors/deployment.go new file mode 100644 index 0000000000..c8739a8cbc --- /dev/null +++ b/pkg/collectors/deployment.go @@ -0,0 +1,223 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descDeploymentLabelsName = "kube_deployment_labels" + descDeploymentLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descDeploymentLabelsDefaultLabels = []string{"namespace", "deployment"} + + deploymentMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_deployment_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + f := metrics.Family{} + + if !d.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_deployment_created", + Value: float64(d.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_status_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of replicas per deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_status_replicas", + Value: float64(d.Status.Replicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_status_replicas_available", + Type: metrics.MetricTypeGauge, + Help: "The number of available replicas per deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_status_replicas_available", + Value: float64(d.Status.AvailableReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_status_replicas_unavailable", + Type: metrics.MetricTypeGauge, + Help: "The number of unavailable replicas per deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_status_replicas_unavailable", + Value: float64(d.Status.UnavailableReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_status_replicas_updated", + Type: metrics.MetricTypeGauge, + Help: "The number of updated replicas per deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_status_replicas_updated", + Value: float64(d.Status.UpdatedReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_status_observed_generation", + Type: metrics.MetricTypeGauge, + Help: "The generation observed by the deployment controller.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_status_observed_generation", + Value: float64(d.Status.ObservedGeneration), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_spec_replicas", + Type: metrics.MetricTypeGauge, + Help: "Number of desired pods for a deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_spec_replicas", + Value: float64(*d.Spec.Replicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_spec_paused", + Type: metrics.MetricTypeGauge, + Help: "Whether the deployment is paused and will not be processed by the deployment controller.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_spec_paused", + Value: boolFloat64(d.Spec.Paused), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_spec_strategy_rollingupdate_max_unavailable", + Type: metrics.MetricTypeGauge, + Help: "Maximum number of unavailable replicas during a rolling update of a deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + if d.Spec.Strategy.RollingUpdate == nil { + return metrics.Family{} + } + + maxUnavailable, err := intstr.GetValueFromIntOrPercent(d.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*d.Spec.Replicas), true) + if err != nil { + panic(err) + } + + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_spec_strategy_rollingupdate_max_unavailable", + Value: float64(maxUnavailable), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_spec_strategy_rollingupdate_max_surge", + Type: metrics.MetricTypeGauge, + Help: "Maximum number of replicas that can be scheduled above the desired number of replicas during a rolling update of a deployment.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + if d.Spec.Strategy.RollingUpdate == nil { + return metrics.Family{} + } + + maxSurge, err := intstr.GetValueFromIntOrPercent(d.Spec.Strategy.RollingUpdate.MaxSurge, int(*d.Spec.Replicas), true) + if err != nil { + panic(err) + } + + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_spec_strategy_rollingupdate_max_surge", + Value: float64(maxSurge), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_deployment_metadata_generation", + Type: metrics.MetricTypeGauge, + Help: "Sequence number representing a specific generation of the desired state.", + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_deployment_metadata_generation", + Value: float64(d.ObjectMeta.Generation), + }} + }), + }, + metrics.FamilyGenerator{ + Name: descDeploymentLabelsName, + Type: metrics.MetricTypeGauge, + Help: descDeploymentLabelsHelp, + GenerateFunc: wrapDeploymentFunc(func(d *v1beta1.Deployment) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(d.Labels) + return metrics.Family{&metrics.Metric{ + Name: descDeploymentLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + } +) + +func wrapDeploymentFunc(f func(*v1beta1.Deployment) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + deployment := obj.(*v1beta1.Deployment) + + metricFamily := f(deployment) + + for _, m := range metricFamily { + m.LabelKeys = append(descDeploymentLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{deployment.Namespace, deployment.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createDeploymentListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.ExtensionsV1beta1().Deployments(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.ExtensionsV1beta1().Deployments(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/deployment_test.go b/pkg/collectors/deployment_test.go new file mode 100644 index 0000000000..144641e1ef --- /dev/null +++ b/pkg/collectors/deployment_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var ( + depl1Replicas int32 = 200 + depl2Replicas int32 = 5 + + depl1MaxUnavailable = intstr.FromInt(10) + depl2MaxUnavailable = intstr.FromString("20%") + + depl1MaxSurge = intstr.FromInt(10) + depl2MaxSurge = intstr.FromString("20%") +) + +func TestDeploymentCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_deployment_created Unix creation timestamp + # TYPE kube_deployment_created gauge + # HELP kube_deployment_metadata_generation Sequence number representing a specific generation of the desired state. + # TYPE kube_deployment_metadata_generation gauge + # HELP kube_deployment_spec_paused Whether the deployment is paused and will not be processed by the deployment controller. + # TYPE kube_deployment_spec_paused gauge + # HELP kube_deployment_spec_replicas Number of desired pods for a deployment. + # TYPE kube_deployment_spec_replicas gauge + # HELP kube_deployment_status_replicas The number of replicas per deployment. + # TYPE kube_deployment_status_replicas gauge + # HELP kube_deployment_status_replicas_available The number of available replicas per deployment. + # TYPE kube_deployment_status_replicas_available gauge + # HELP kube_deployment_status_replicas_unavailable The number of unavailable replicas per deployment. + # TYPE kube_deployment_status_replicas_unavailable gauge + # HELP kube_deployment_status_replicas_updated The number of updated replicas per deployment. + # TYPE kube_deployment_status_replicas_updated gauge + # HELP kube_deployment_status_observed_generation The generation observed by the deployment controller. + # TYPE kube_deployment_status_observed_generation gauge + # HELP kube_deployment_spec_strategy_rollingupdate_max_unavailable Maximum number of unavailable replicas during a rolling update of a deployment. + # TYPE kube_deployment_spec_strategy_rollingupdate_max_unavailable gauge + # HELP kube_deployment_spec_strategy_rollingupdate_max_surge Maximum number of replicas that can be scheduled above the desired number of replicas during a rolling update of a deployment. + # TYPE kube_deployment_spec_strategy_rollingupdate_max_surge gauge + # HELP kube_deployment_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_deployment_labels gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "depl1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + Labels: map[string]string{ + "app": "example1", + }, + Generation: 21, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 15, + AvailableReplicas: 10, + UnavailableReplicas: 5, + UpdatedReplicas: 2, + ObservedGeneration: 111, + }, + Spec: v1beta1.DeploymentSpec{ + Replicas: &depl1Replicas, + Strategy: v1beta1.DeploymentStrategy{ + RollingUpdate: &v1beta1.RollingUpdateDeployment{ + MaxUnavailable: &depl1MaxUnavailable, + MaxSurge: &depl1MaxSurge, + }, + }, + }, + }, + Want: ` + kube_deployment_created{deployment="depl1",namespace="ns1"} 1.5e+09 + kube_deployment_labels{deployment="depl1",label_app="example1",namespace="ns1"} 1 + kube_deployment_metadata_generation{deployment="depl1",namespace="ns1"} 21 + kube_deployment_spec_paused{deployment="depl1",namespace="ns1"} 0 + kube_deployment_spec_replicas{deployment="depl1",namespace="ns1"} 200 + kube_deployment_spec_strategy_rollingupdate_max_surge{deployment="depl1",namespace="ns1"} 10 + kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="depl1",namespace="ns1"} 10 + kube_deployment_status_observed_generation{deployment="depl1",namespace="ns1"} 111 + kube_deployment_status_replicas_available{deployment="depl1",namespace="ns1"} 10 + kube_deployment_status_replicas_unavailable{deployment="depl1",namespace="ns1"} 5 + kube_deployment_status_replicas_updated{deployment="depl1",namespace="ns1"} 2 + kube_deployment_status_replicas{deployment="depl1",namespace="ns1"} 15 +`, + }, + { + Obj: &v1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "depl2", + Namespace: "ns2", + Labels: map[string]string{ + "app": "example2", + }, + Generation: 14, + }, + Status: v1beta1.DeploymentStatus{ + Replicas: 10, + AvailableReplicas: 5, + UnavailableReplicas: 0, + UpdatedReplicas: 1, + ObservedGeneration: 1111, + }, + Spec: v1beta1.DeploymentSpec{ + Paused: true, + Replicas: &depl2Replicas, + Strategy: v1beta1.DeploymentStrategy{ + RollingUpdate: &v1beta1.RollingUpdateDeployment{ + MaxUnavailable: &depl2MaxUnavailable, + MaxSurge: &depl2MaxSurge, + }, + }, + }, + }, + Want: ` + kube_deployment_labels{deployment="depl2",label_app="example2",namespace="ns2"} 1 + kube_deployment_metadata_generation{deployment="depl2",namespace="ns2"} 14 + kube_deployment_spec_paused{deployment="depl2",namespace="ns2"} 1 + kube_deployment_spec_replicas{deployment="depl2",namespace="ns2"} 5 + kube_deployment_spec_strategy_rollingupdate_max_surge{deployment="depl2",namespace="ns2"} 1 + kube_deployment_spec_strategy_rollingupdate_max_unavailable{deployment="depl2",namespace="ns2"} 1 + kube_deployment_status_observed_generation{deployment="depl2",namespace="ns2"} 1111 + kube_deployment_status_replicas_available{deployment="depl2",namespace="ns2"} 5 + kube_deployment_status_replicas_unavailable{deployment="depl2",namespace="ns2"} 0 + kube_deployment_status_replicas_updated{deployment="depl2",namespace="ns2"} 1 + kube_deployment_status_replicas{deployment="depl2",namespace="ns2"} 10 +`, + }, + } + + for i, c := range cases { + c.Func = composeMetricGenFuncs(deploymentMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/endpoint.go b/pkg/collectors/endpoint.go new file mode 100644 index 0000000000..212e660a80 --- /dev/null +++ b/pkg/collectors/endpoint.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descEndpointLabelsName = "kube_endpoint_labels" + descEndpointLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descEndpointLabelsDefaultLabels = []string{"namespace", "endpoint"} + + endpointMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_endpoint_info", + Type: metrics.MetricTypeGauge, + Help: "Information about endpoint.", + GenerateFunc: wrapEndpointFunc(func(e *v1.Endpoints) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_endpoint_info", + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_endpoint_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapEndpointFunc(func(e *v1.Endpoints) metrics.Family { + f := metrics.Family{} + + if !e.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_endpoint_created", + Value: float64(e.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: descEndpointLabelsName, + Type: metrics.MetricTypeGauge, + Help: descEndpointLabelsHelp, + GenerateFunc: wrapEndpointFunc(func(e *v1.Endpoints) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(e.Labels) + return metrics.Family{&metrics.Metric{ + Name: descEndpointLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_endpoint_address_available", + Type: metrics.MetricTypeGauge, + Help: "Number of addresses available in endpoint.", + GenerateFunc: wrapEndpointFunc(func(e *v1.Endpoints) metrics.Family { + var available int + for _, s := range e.Subsets { + available += len(s.Addresses) * len(s.Ports) + } + + return metrics.Family{&metrics.Metric{ + Name: "kube_endpoint_address_available", + Value: float64(available), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_endpoint_address_not_ready", + Type: metrics.MetricTypeGauge, + Help: "Number of addresses not ready in endpoint", + GenerateFunc: wrapEndpointFunc(func(e *v1.Endpoints) metrics.Family { + var notReady int + for _, s := range e.Subsets { + notReady += len(s.NotReadyAddresses) * len(s.Ports) + } + return metrics.Family{&metrics.Metric{ + Name: "kube_endpoint_address_not_ready", + Value: float64(notReady), + }} + }), + }, + } +) + +func wrapEndpointFunc(f func(*v1.Endpoints) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + endpoint := obj.(*v1.Endpoints) + + metricFamily := f(endpoint) + + for _, m := range metricFamily { + m.LabelKeys = append(descEndpointLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{endpoint.Namespace, endpoint.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createEndpointsListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Endpoints(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Endpoints(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/endpoint_test.go b/pkg/collectors/endpoint_test.go new file mode 100644 index 0000000000..198e375f83 --- /dev/null +++ b/pkg/collectors/endpoint_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +// TODO: Shouldn't this file be called endpoints? + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestEndpointCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_endpoint_address_available Number of addresses available in endpoint. + # TYPE kube_endpoint_address_available gauge + # HELP kube_endpoint_address_not_ready Number of addresses not ready in endpoint + # TYPE kube_endpoint_address_not_ready gauge + # HELP kube_endpoint_created Unix creation timestamp + # TYPE kube_endpoint_created gauge + # HELP kube_endpoint_info Information about endpoint. + # TYPE kube_endpoint_info gauge + # HELP kube_endpoint_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_endpoint_labels gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-endpoint", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "foobar", + }, + }, + Subsets: []v1.EndpointSubset{ + {Addresses: []v1.EndpointAddress{ + {IP: "127.0.0.1"}, {IP: "10.0.0.1"}, + }, + Ports: []v1.EndpointPort{ + {Port: 8080}, {Port: 8081}, + }, + }, + {Addresses: []v1.EndpointAddress{ + {IP: "172.22.23.202"}, + }, + Ports: []v1.EndpointPort{ + {Port: 8443}, {Port: 9090}, + }, + }, + {NotReadyAddresses: []v1.EndpointAddress{ + {IP: "192.168.1.1"}, + }, + Ports: []v1.EndpointPort{ + {Port: 1234}, {Port: 5678}, + }, + }, + {NotReadyAddresses: []v1.EndpointAddress{ + {IP: "192.168.1.3"}, {IP: "192.168.2.2"}, + }, + Ports: []v1.EndpointPort{ + {Port: 1234}, {Port: 5678}, + }, + }, + }, + }, + Want: ` + kube_endpoint_address_available{endpoint="test-endpoint",namespace="default"} 6 + kube_endpoint_address_not_ready{endpoint="test-endpoint",namespace="default"} 6 + kube_endpoint_created{endpoint="test-endpoint",namespace="default"} 1.5e+09 + kube_endpoint_info{endpoint="test-endpoint",namespace="default"} 1 + kube_endpoint_labels{endpoint="test-endpoint",label_app="foobar",namespace="default"} 1 + `, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(endpointMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/hpa.go b/pkg/collectors/hpa.go new file mode 100644 index 0000000000..2747b211e8 --- /dev/null +++ b/pkg/collectors/hpa.go @@ -0,0 +1,154 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + autoscaling "k8s.io/api/autoscaling/v2beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descHorizontalPodAutoscalerLabelsName = "kube_hpa_labels" + descHorizontalPodAutoscalerLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descHorizontalPodAutoscalerLabelsDefaultLabels = []string{"namespace", "hpa"} + + hpaMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_hpa_metadata_generation", + Type: metrics.MetricTypeGauge, + Help: "The generation observed by the HorizontalPodAutoscaler controller.", + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_hpa_metadata_generation", + Value: float64(a.ObjectMeta.Generation), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_hpa_spec_max_replicas", + Type: metrics.MetricTypeGauge, + Help: "Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_hpa_spec_max_replicas", + Value: float64(a.Spec.MaxReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_hpa_spec_min_replicas", + Type: metrics.MetricTypeGauge, + Help: "Lower limit for the number of pods that can be set by the autoscaler, default 1.", + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_hpa_spec_min_replicas", + Value: float64(*a.Spec.MinReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_hpa_status_current_replicas", + Type: metrics.MetricTypeGauge, + Help: "Current number of replicas of pods managed by this autoscaler.", + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_hpa_status_current_replicas", + Value: float64(a.Status.CurrentReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_hpa_status_desired_replicas", + Type: metrics.MetricTypeGauge, + Help: "Desired number of replicas of pods managed by this autoscaler.", + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_hpa_status_desired_replicas", + Value: float64(a.Status.DesiredReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: descHorizontalPodAutoscalerLabelsName, + Type: metrics.MetricTypeGauge, + Help: descHorizontalPodAutoscalerLabelsHelp, + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(a.Labels) + return metrics.Family{&metrics.Metric{ + Name: descHorizontalPodAutoscalerLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_hpa_status_condition", + Type: metrics.MetricTypeGauge, + Help: "The condition of this autoscaler.", + GenerateFunc: wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) metrics.Family { + f := metrics.Family{} + + for _, c := range a.Status.Conditions { + metrics := addConditionMetrics(c.Status) + + for _, m := range metrics { + metric := m + metric.Name = "kube_hpa_status_condition" + metric.LabelKeys = []string{"condition", "status"} + metric.LabelValues = append(metric.LabelValues, string(c.Type)) + f = append(f, metric) + } + } + + return f + }), + }, + } +) + +func wrapHPAFunc(f func(*autoscaling.HorizontalPodAutoscaler) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + hpa := obj.(*autoscaling.HorizontalPodAutoscaler) + + metricFamily := f(hpa) + + for _, m := range metricFamily { + m.LabelKeys = append(descHorizontalPodAutoscalerLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{hpa.Namespace, hpa.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createHPAListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/hpa_test.go b/pkg/collectors/hpa_test.go new file mode 100644 index 0000000000..51e123dcd3 --- /dev/null +++ b/pkg/collectors/hpa_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + + autoscaling "k8s.io/api/autoscaling/v2beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + hpa1MinReplicas int32 = 2 +) + +func TestHPACollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_hpa_metadata_generation The generation observed by the HorizontalPodAutoscaler controller. + # TYPE kube_hpa_metadata_generation gauge + # HELP kube_hpa_spec_max_replicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + # TYPE kube_hpa_spec_max_replicas gauge + # HELP kube_hpa_spec_min_replicas Lower limit for the number of pods that can be set by the autoscaler, default 1. + # TYPE kube_hpa_spec_min_replicas gauge + # HELP kube_hpa_status_current_replicas Current number of replicas of pods managed by this autoscaler. + # TYPE kube_hpa_status_current_replicas gauge + # HELP kube_hpa_status_desired_replicas Desired number of replicas of pods managed by this autoscaler. + # TYPE kube_hpa_status_desired_replicas gauge + ` + cases := []generateMetricsTestCase{ + { + // Verify populating base metrics. + Obj: &autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + Name: "hpa1", + Namespace: "ns1", + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + MaxReplicas: 4, + MinReplicas: &hpa1MinReplicas, + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + APIVersion: "extensions/v1beta1", + Kind: "Deployment", + Name: "deployment1", + }, + }, + Status: autoscaling.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 2, + DesiredReplicas: 2, + }, + }, + Want: ` + kube_hpa_metadata_generation{hpa="hpa1",namespace="ns1"} 2 + kube_hpa_spec_max_replicas{hpa="hpa1",namespace="ns1"} 4 + kube_hpa_spec_min_replicas{hpa="hpa1",namespace="ns1"} 2 + kube_hpa_status_current_replicas{hpa="hpa1",namespace="ns1"} 2 + kube_hpa_status_desired_replicas{hpa="hpa1",namespace="ns1"} 2 + `, + MetricNames: []string{ + "kube_hpa_metadata_generation", + "kube_hpa_spec_max_replicas", + "kube_hpa_spec_min_replicas", + "kube_hpa_status_current_replicas", + "kube_hpa_status_desired_replicas", + }, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(hpaMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/job.go b/pkg/collectors/job.go new file mode 100644 index 0000000000..51bc30a00b --- /dev/null +++ b/pkg/collectors/job.go @@ -0,0 +1,265 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + v1batch "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descJobLabelsName = "kube_job_labels" + descJobLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descJobLabelsDefaultLabels = []string{"namespace", "job_name"} + + jobMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: descJobLabelsName, + Type: metrics.MetricTypeGauge, + Help: descJobLabelsHelp, + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(j.Labels) + return metrics.Family{&metrics.Metric{ + Name: descJobLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_info", + Type: metrics.MetricTypeGauge, + Help: "Information about job.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_job_info", + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + + if !j.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_job_created", + Value: float64(j.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_spec_parallelism", + Type: metrics.MetricTypeGauge, + Help: "The maximum desired number of pods the job should run at any given time.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + + if j.Spec.Parallelism != nil { + f = append(f, &metrics.Metric{ + Name: "kube_job_spec_parallelism", + Value: float64(*j.Spec.Parallelism), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_spec_completions", + Type: metrics.MetricTypeGauge, + Help: "The desired number of successfully finished pods the job should be run with.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + + if j.Spec.Completions != nil { + f = append(f, &metrics.Metric{ + Name: "kube_job_spec_completions", + Value: float64(*j.Spec.Completions), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_spec_active_deadline_seconds", + Type: metrics.MetricTypeGauge, + Help: "The duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + + if j.Spec.ActiveDeadlineSeconds != nil { + f = append(f, &metrics.Metric{ + Name: "kube_job_spec_active_deadline_seconds", + Value: float64(*j.Spec.ActiveDeadlineSeconds), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_status_succeeded", + Type: metrics.MetricTypeGauge, + Help: "The number of pods which reached Phase Succeeded.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_job_status_succeeded", + Value: float64(j.Status.Succeeded), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_status_failed", + Type: metrics.MetricTypeGauge, + Help: "The number of pods which reached Phase Failed.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_job_status_failed", + Value: float64(j.Status.Failed), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_status_active", + Type: metrics.MetricTypeGauge, + Help: "The number of actively running pods.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_job_status_active", + Value: float64(j.Status.Active), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_complete", + Type: metrics.MetricTypeGauge, + Help: "The job has completed its execution.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + for _, c := range j.Status.Conditions { + if c.Type == v1batch.JobComplete { + metrics := addConditionMetrics(c.Status) + for _, m := range metrics { + metric := m + metric.Name = "kube_job_complete" + metric.LabelKeys = []string{"condition"} + f = append(f, metric) + } + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_failed", + Type: metrics.MetricTypeGauge, + Help: "The job has failed its execution.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + + for _, c := range j.Status.Conditions { + if c.Type == v1batch.JobFailed { + metrics := addConditionMetrics(c.Status) + for _, m := range metrics { + metric := m + metric.Name = "kube_job_failed" + metric.LabelKeys = []string{"condition"} + f = append(f, m) + } + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_status_start_time", + Type: metrics.MetricTypeGauge, + Help: "StartTime represents time when the job was acknowledged by the Job Manager.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + + if j.Status.StartTime != nil { + f = append(f, &metrics.Metric{ + Name: "kube_job_status_start_time", + Value: float64(j.Status.StartTime.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_job_status_completion_time", + Type: metrics.MetricTypeGauge, + Help: "CompletionTime represents time when the job was completed.", + GenerateFunc: wrapJobFunc(func(j *v1batch.Job) metrics.Family { + f := metrics.Family{} + if j.Status.CompletionTime != nil { + f = append(f, &metrics.Metric{ + Name: "kube_job_status_completion_time", + Value: float64(j.Status.CompletionTime.Unix()), + }) + } + + return f + }), + }, + } +) + +func wrapJobFunc(f func(*v1batch.Job) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + job := obj.(*v1batch.Job) + + metricFamily := f(job) + + for _, m := range metricFamily { + m.LabelKeys = append(descJobLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{job.Namespace, job.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createJobListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.BatchV1().Jobs(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.BatchV1().Jobs(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/job_test.go b/pkg/collectors/job_test.go new file mode 100644 index 0000000000..c717e2899a --- /dev/null +++ b/pkg/collectors/job_test.go @@ -0,0 +1,245 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + v1batch "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + Parallelism1 int32 = 1 + Completions1 int32 = 1 + ActiveDeadlineSeconds900 int64 = 900 + + RunningJob1StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T12:00:07Z") + SuccessfulJob1StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T12:00:07Z") + FailedJob1StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T14:00:07Z") + SuccessfulJob2StartTime, _ = time.Parse(time.RFC3339, "2017-05-26T12:10:07Z") + + SuccessfulJob1CompletionTime, _ = time.Parse(time.RFC3339, "2017-05-26T13:00:07Z") + FailedJob1CompletionTime, _ = time.Parse(time.RFC3339, "2017-05-26T15:00:07Z") + SuccessfulJob2CompletionTime, _ = time.Parse(time.RFC3339, "2017-05-26T13:10:07Z") +) + +func TestJobCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_job_created Unix creation timestamp + # TYPE kube_job_created gauge + # HELP kube_job_complete The job has completed its execution. + # TYPE kube_job_complete gauge + # HELP kube_job_failed The job has failed its execution. + # TYPE kube_job_failed gauge + # HELP kube_job_info Information about job. + # TYPE kube_job_info gauge + # HELP kube_job_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_job_labels gauge + # HELP kube_job_spec_active_deadline_seconds The duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. + # TYPE kube_job_spec_active_deadline_seconds gauge + # HELP kube_job_spec_completions The desired number of successfully finished pods the job should be run with. + # TYPE kube_job_spec_completions gauge + # HELP kube_job_spec_parallelism The maximum desired number of pods the job should run at any given time. + # TYPE kube_job_spec_parallelism gauge + # HELP kube_job_status_active The number of actively running pods. + # TYPE kube_job_status_active gauge + # HELP kube_job_status_completion_time CompletionTime represents time when the job was completed. + # TYPE kube_job_status_completion_time gauge + # HELP kube_job_status_failed The number of pods which reached Phase Failed. + # TYPE kube_job_status_failed gauge + # HELP kube_job_status_start_time StartTime represents time when the job was acknowledged by the Job Manager. + # TYPE kube_job_status_start_time gauge + # HELP kube_job_status_succeeded The number of pods which reached Phase Succeeded. + # TYPE kube_job_status_succeeded gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "RunningJob1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-running-1", + }, + }, + Status: v1batch.JobStatus{ + Active: 1, + Failed: 0, + Succeeded: 0, + CompletionTime: nil, + StartTime: &metav1.Time{Time: RunningJob1StartTime}, + }, + Spec: v1batch.JobSpec{ + ActiveDeadlineSeconds: &ActiveDeadlineSeconds900, + Parallelism: &Parallelism1, + Completions: &Completions1, + }, + }, + Want: ` + kube_job_created{job_name="RunningJob1",namespace="ns1"} 1.5e+09 + kube_job_info{job_name="RunningJob1",namespace="ns1"} 1 + kube_job_labels{job_name="RunningJob1",label_app="example-running-1",namespace="ns1"} 1 + kube_job_spec_active_deadline_seconds{job_name="RunningJob1",namespace="ns1"} 900 + kube_job_spec_completions{job_name="RunningJob1",namespace="ns1"} 1 + kube_job_spec_parallelism{job_name="RunningJob1",namespace="ns1"} 1 + kube_job_status_active{job_name="RunningJob1",namespace="ns1"} 1 + kube_job_status_failed{job_name="RunningJob1",namespace="ns1"} 0 + kube_job_status_start_time{job_name="RunningJob1",namespace="ns1"} 1.495800007e+09 + kube_job_status_succeeded{job_name="RunningJob1",namespace="ns1"} 0 +`, + }, + { + Obj: &v1batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "SuccessfulJob1", + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-successful-1", + }, + }, + Status: v1batch.JobStatus{ + Active: 0, + Failed: 0, + Succeeded: 1, + CompletionTime: &metav1.Time{Time: SuccessfulJob1CompletionTime}, + StartTime: &metav1.Time{Time: SuccessfulJob1StartTime}, + Conditions: []v1batch.JobCondition{ + {Type: v1batch.JobComplete, Status: v1.ConditionTrue}, + }, + }, + Spec: v1batch.JobSpec{ + ActiveDeadlineSeconds: &ActiveDeadlineSeconds900, + Parallelism: &Parallelism1, + Completions: &Completions1, + }, + }, + Want: ` + kube_job_complete{condition="false",job_name="SuccessfulJob1",namespace="ns1"} 0 + kube_job_complete{condition="true",job_name="SuccessfulJob1",namespace="ns1"} 1 + kube_job_complete{condition="unknown",job_name="SuccessfulJob1",namespace="ns1"} 0 + kube_job_info{job_name="SuccessfulJob1",namespace="ns1"} 1 + kube_job_labels{job_name="SuccessfulJob1",label_app="example-successful-1",namespace="ns1"} 1 + kube_job_spec_active_deadline_seconds{job_name="SuccessfulJob1",namespace="ns1"} 900 + kube_job_spec_completions{job_name="SuccessfulJob1",namespace="ns1"} 1 + kube_job_spec_parallelism{job_name="SuccessfulJob1",namespace="ns1"} 1 + kube_job_status_active{job_name="SuccessfulJob1",namespace="ns1"} 0 + kube_job_status_completion_time{job_name="SuccessfulJob1",namespace="ns1"} 1.495803607e+09 + kube_job_status_failed{job_name="SuccessfulJob1",namespace="ns1"} 0 + kube_job_status_start_time{job_name="SuccessfulJob1",namespace="ns1"} 1.495800007e+09 + kube_job_status_succeeded{job_name="SuccessfulJob1",namespace="ns1"} 1 +`, + }, + { + Obj: &v1batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "FailedJob1", + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-failed-1", + }, + }, + Status: v1batch.JobStatus{ + Active: 0, + Failed: 1, + Succeeded: 0, + CompletionTime: &metav1.Time{Time: FailedJob1CompletionTime}, + StartTime: &metav1.Time{Time: FailedJob1StartTime}, + Conditions: []v1batch.JobCondition{ + {Type: v1batch.JobFailed, Status: v1.ConditionTrue}, + }, + }, + Spec: v1batch.JobSpec{ + ActiveDeadlineSeconds: &ActiveDeadlineSeconds900, + Parallelism: &Parallelism1, + Completions: &Completions1, + }, + }, + Want: ` + kube_job_failed{condition="false",job_name="FailedJob1",namespace="ns1"} 0 + kube_job_failed{condition="true",job_name="FailedJob1",namespace="ns1"} 1 + kube_job_failed{condition="unknown",job_name="FailedJob1",namespace="ns1"} 0 + kube_job_info{job_name="FailedJob1",namespace="ns1"} 1 + kube_job_labels{job_name="FailedJob1",label_app="example-failed-1",namespace="ns1"} 1 + kube_job_spec_active_deadline_seconds{job_name="FailedJob1",namespace="ns1"} 900 + kube_job_spec_completions{job_name="FailedJob1",namespace="ns1"} 1 + kube_job_spec_parallelism{job_name="FailedJob1",namespace="ns1"} 1 + kube_job_status_active{job_name="FailedJob1",namespace="ns1"} 0 + kube_job_status_completion_time{job_name="FailedJob1",namespace="ns1"} 1.495810807e+09 + kube_job_status_failed{job_name="FailedJob1",namespace="ns1"} 1 + kube_job_status_start_time{job_name="FailedJob1",namespace="ns1"} 1.495807207e+09 + kube_job_status_succeeded{job_name="FailedJob1",namespace="ns1"} 0 +`, + }, + { + Obj: &v1batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "SuccessfulJob2NoActiveDeadlineSeconds", + Namespace: "ns1", + Generation: 1, + Labels: map[string]string{ + "app": "example-successful-2", + }, + }, + Status: v1batch.JobStatus{ + Active: 0, + Failed: 0, + Succeeded: 1, + CompletionTime: &metav1.Time{Time: SuccessfulJob2CompletionTime}, + StartTime: &metav1.Time{Time: SuccessfulJob2StartTime}, + Conditions: []v1batch.JobCondition{ + {Type: v1batch.JobComplete, Status: v1.ConditionTrue}, + }, + }, + Spec: v1batch.JobSpec{ + ActiveDeadlineSeconds: nil, + Parallelism: &Parallelism1, + Completions: &Completions1, + }, + }, + Want: ` + kube_job_complete{condition="false",job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 + kube_job_complete{condition="true",job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 + + kube_job_complete{condition="unknown",job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 + kube_job_info{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 + kube_job_labels{job_name="SuccessfulJob2NoActiveDeadlineSeconds",label_app="example-successful-2",namespace="ns1"} 1 + kube_job_spec_completions{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 + kube_job_spec_parallelism{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 + kube_job_status_active{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 + kube_job_status_completion_time{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1.495804207e+09 + kube_job_status_failed{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 0 + kube_job_status_start_time{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1.495800607e+09 + kube_job_status_succeeded{job_name="SuccessfulJob2NoActiveDeadlineSeconds",namespace="ns1"} 1 +`, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(jobMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/limitrange.go b/pkg/collectors/limitrange.go new file mode 100644 index 0000000000..9d60b52877 --- /dev/null +++ b/pkg/collectors/limitrange.go @@ -0,0 +1,131 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descLimitRangeLabelsDefaultLabels = []string{"namespace", "limitrange"} + + limitRangeMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_limitrange", + Type: metrics.MetricTypeGauge, + Help: "Information about limit range.", + GenerateFunc: wrapLimitRangeFunc(func(r *v1.LimitRange) metrics.Family { + f := metrics.Family{} + + rawLimitRanges := r.Spec.Limits + for _, rawLimitRange := range rawLimitRanges { + for resource, min := range rawLimitRange.Min { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(resource), string(rawLimitRange.Type), "min"}, + Value: float64(min.MilliValue()) / 1000, + }) + } + + for resource, max := range rawLimitRange.Max { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(resource), string(rawLimitRange.Type), "max"}, + Value: float64(max.MilliValue()) / 1000, + }) + } + + for resource, df := range rawLimitRange.Default { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(resource), string(rawLimitRange.Type), "default"}, + Value: float64(df.MilliValue()) / 1000, + }) + } + + for resource, dfR := range rawLimitRange.DefaultRequest { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(resource), string(rawLimitRange.Type), "defaultRequest"}, + Value: float64(dfR.MilliValue()) / 1000, + }) + } + + for resource, mLR := range rawLimitRange.MaxLimitRequestRatio { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(resource), string(rawLimitRange.Type), "maxLimitRequestRatio"}, + Value: float64(mLR.MilliValue()) / 1000, + }) + } + } + + for _, m := range f { + m.Name = "kube_limitrange" + m.LabelKeys = []string{"resource", "type", "constraint"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_limitrange_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapLimitRangeFunc(func(r *v1.LimitRange) metrics.Family { + f := metrics.Family{} + + if !r.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_limitrange_created", + Value: float64(r.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + } +) + +func wrapLimitRangeFunc(f func(*v1.LimitRange) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + limitRange := obj.(*v1.LimitRange) + + metricFamily := f(limitRange) + + for _, m := range metricFamily { + m.LabelKeys = append(descLimitRangeLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{limitRange.Namespace, limitRange.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createLimitRangeListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().LimitRanges(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().LimitRanges(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/limitrange_test.go b/pkg/collectors/limitrange_test.go new file mode 100644 index 0000000000..251f7c6c09 --- /dev/null +++ b/pkg/collectors/limitrange_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestLimitRangeollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + testMemory := "2.1G" + testMemoryQuantity := resource.MustParse(testMemory) + const metadata = ` + # HELP kube_limitrange_created Unix creation timestamp + # TYPE kube_limitrange_created gauge + # HELP kube_limitrange Information about limit range. + # TYPE kube_limitrange gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Name: "quotaTest", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "testNS", + }, + Spec: v1.LimitRangeSpec{ + Limits: []v1.LimitRangeItem{ + { + Type: v1.LimitTypePod, + Max: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: testMemoryQuantity, + }, + Min: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: testMemoryQuantity, + }, + Default: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: testMemoryQuantity, + }, + DefaultRequest: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: testMemoryQuantity, + }, + MaxLimitRequestRatio: map[v1.ResourceName]resource.Quantity{ + v1.ResourceMemory: testMemoryQuantity, + }, + }, + }, + }, + }, + Want: ` + kube_limitrange_created{limitrange="quotaTest",namespace="testNS"} 1.5e+09 + kube_limitrange{constraint="default",limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod"} 2.1e+09 + kube_limitrange{constraint="defaultRequest",limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod"} 2.1e+09 + kube_limitrange{constraint="max",limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod"} 2.1e+09 + kube_limitrange{constraint="maxLimitRequestRatio",limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod"} 2.1e+09 + kube_limitrange{constraint="min",limitrange="quotaTest",namespace="testNS",resource="memory",type="Pod"} 2.1e+09 + + `, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(limitRangeMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/namespace.go b/pkg/collectors/namespace.go new file mode 100644 index 0000000000..d84e6c165c --- /dev/null +++ b/pkg/collectors/namespace.go @@ -0,0 +1,135 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descNamespaceLabelsName = "kube_namespace_labels" + descNamespaceLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descNamespaceLabelsDefaultLabels = []string{"namespace"} + + descNamespaceAnnotationsName = "kube_namespace_annotations" + descNamespaceAnnotationsHelp = "Kubernetes annotations converted to Prometheus labels." + descNamespaceAnnotationsDefaultLabels = []string{"namespace"} + + namespaceMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_namespace_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapNamespaceFunc(func(n *v1.Namespace) metrics.Family { + f := metrics.Family{} + if !n.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_namespace_created", + Value: float64(n.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: descNamespaceLabelsName, + Type: metrics.MetricTypeGauge, + Help: descNamespaceLabelsHelp, + GenerateFunc: wrapNamespaceFunc(func(n *v1.Namespace) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(n.Labels) + return metrics.Family{&metrics.Metric{ + Name: descNamespaceLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: descNamespaceAnnotationsName, + Type: metrics.MetricTypeGauge, + Help: descNamespaceAnnotationsHelp, + GenerateFunc: wrapNamespaceFunc(func(n *v1.Namespace) metrics.Family { + annotationKeys, annotationValues := kubeAnnotationsToPrometheusAnnotations(n.Annotations) + return metrics.Family{&metrics.Metric{ + Name: descNamespaceAnnotationsName, + LabelKeys: annotationKeys, + LabelValues: annotationValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_namespace_status_phase", + Type: metrics.MetricTypeGauge, + Help: "kubernetes namespace status phase.", + GenerateFunc: wrapNamespaceFunc(func(n *v1.Namespace) metrics.Family { + families := metrics.Family{ + &metrics.Metric{ + LabelValues: []string{string(v1.NamespaceActive)}, + Value: boolFloat64(n.Status.Phase == v1.NamespaceActive), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.NamespaceTerminating)}, + Value: boolFloat64(n.Status.Phase == v1.NamespaceTerminating), + }, + } + + for _, f := range families { + f.Name = "kube_namespace_status_phase" + f.LabelKeys = []string{"phase"} + } + + return families + }), + }, + } +) + +func wrapNamespaceFunc(f func(*v1.Namespace) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + namespace := obj.(*v1.Namespace) + + metricFamily := f(namespace) + + for _, m := range metricFamily { + m.LabelKeys = append(descNamespaceLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{namespace.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createNamespaceListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Namespaces().List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Namespaces().Watch(opts) + }, + } +} diff --git a/pkg/collectors/namespace_test.go b/pkg/collectors/namespace_test.go new file mode 100644 index 0000000000..962153656b --- /dev/null +++ b/pkg/collectors/namespace_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNamespaceCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_namespace_created Unix creation timestamp + # TYPE kube_namespace_created gauge + # HELP kube_namespace_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_namespace_labels gauge + # HELP kube_namespace_annotations Kubernetes annotations converted to Prometheus labels. + # TYPE kube_namespace_annotations gauge + # HELP kube_namespace_status_phase kubernetes namespace status phase. + # TYPE kube_namespace_status_phase gauge + ` + + cases := []generateMetricsTestCase{ + { + Obj: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nsActiveTest", + }, + Spec: v1.NamespaceSpec{ + Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, + }, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceActive, + }, + }, + Want: ` + kube_namespace_labels{namespace="nsActiveTest"} 1 + kube_namespace_annotations{namespace="nsActiveTest"} 1 + kube_namespace_status_phase{namespace="nsActiveTest",phase="Active"} 1 + kube_namespace_status_phase{namespace="nsActiveTest",phase="Terminating"} 0 +`, + }, + { + Obj: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nsTerminateTest", + }, + Spec: v1.NamespaceSpec{ + Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, + }, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceTerminating, + }, + }, + Want: ` + kube_namespace_labels{namespace="nsTerminateTest"} 1 + kube_namespace_annotations{namespace="nsTerminateTest"} 1 + kube_namespace_status_phase{namespace="nsTerminateTest",phase="Active"} 0 + kube_namespace_status_phase{namespace="nsTerminateTest",phase="Terminating"} 1 +`, + }, + { + + Obj: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Labels: map[string]string{ + "app": "example1", + }, + Annotations: map[string]string{ + "app": "example1", + }, + }, + Spec: v1.NamespaceSpec{ + Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, + }, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceActive, + }, + }, + Want: ` + kube_namespace_created{namespace="ns1"} 1.5e+09 + kube_namespace_labels{label_app="example1",namespace="ns1"} 1 + kube_namespace_annotations{annotation_app="example1",namespace="ns1"} 1 + kube_namespace_status_phase{namespace="ns1",phase="Active"} 1 + kube_namespace_status_phase{namespace="ns1",phase="Terminating"} 0 +`, + }, + { + Obj: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ns2", + Labels: map[string]string{ + "app": "example2", + "l2": "label2", + }, + Annotations: map[string]string{ + "app": "example2", + "l2": "label2", + }, + }, + Spec: v1.NamespaceSpec{ + Finalizers: []v1.FinalizerName{v1.FinalizerKubernetes}, + }, + Status: v1.NamespaceStatus{ + Phase: v1.NamespaceActive, + }, + }, + Want: ` + kube_namespace_labels{label_app="example2",label_l2="label2",namespace="ns2"} 1 + kube_namespace_annotations{annotation_app="example2",annotation_l2="label2",namespace="ns2"} 1 + kube_namespace_status_phase{namespace="ns2",phase="Active"} 1 + kube_namespace_status_phase{namespace="ns2",phase="Terminating"} 0 +`, + }, + } + + for i, c := range cases { + c.Func = composeMetricGenFuncs(namespaceMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/node.go b/pkg/collectors/node.go new file mode 100644 index 0000000000..dbd48a30fe --- /dev/null +++ b/pkg/collectors/node.go @@ -0,0 +1,477 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/constant" + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" +) + +var ( + descNodeLabelsName = "kube_node_labels" + descNodeLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descNodeLabelsDefaultLabels = []string{"node"} + + nodeMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_node_info", + Type: metrics.MetricTypeGauge, + Help: "Information about a cluster node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_node_info", + LabelKeys: []string{ + "kernel_version", + "os_image", + "container_runtime_version", + "kubelet_version", + "kubeproxy_version", + "provider_id", + }, + LabelValues: []string{ + n.Status.NodeInfo.KernelVersion, + n.Status.NodeInfo.OSImage, + n.Status.NodeInfo.ContainerRuntimeVersion, + n.Status.NodeInfo.KubeletVersion, + n.Status.NodeInfo.KubeProxyVersion, + n.Spec.ProviderID, + }, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + if !n.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_node_created", + Value: float64(n.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: descNodeLabelsName, + Type: metrics.MetricTypeGauge, + Help: descNodeLabelsHelp, + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(n.Labels) + return metrics.Family{&metrics.Metric{ + Name: descNodeLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_spec_unschedulable", + Type: metrics.MetricTypeGauge, + Help: "Whether a node can schedule new pods.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_node_spec_unschedulable", + Value: boolFloat64(n.Spec.Unschedulable), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_spec_taint", + Type: metrics.MetricTypeGauge, + Help: "The taint of a cluster node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + for _, taint := range n.Spec.Taints { + // Taints are applied to repel pods from nodes that do not have a corresponding + // toleration. Many node conditions are optionally reflected as taints + // by the node controller in order to simplify scheduling constraints. + f = append(f, &metrics.Metric{ + Name: "kube_node_spec_taint", + LabelKeys: []string{"key", "value", "effect"}, + LabelValues: []string{taint.Key, taint.Value, string(taint.Effect)}, + Value: 1, + }) + } + + return f + }), + }, + // This all-in-one metric family contains all conditions for extensibility. + // Third party plugin may report customized condition for cluster node + // (e.g. node-problem-detector), and Kubernetes may add new core + // conditions in future. + metrics.FamilyGenerator{ + Name: "kube_node_status_condition", + Type: metrics.MetricTypeGauge, + Help: "The condition of a cluster node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Collect node conditions and while default to false. + for _, c := range n.Status.Conditions { + ms := addConditionMetrics(c.Status) + for _, metric := range ms { + metric.Name = "kube_node_status_condition" + metric.LabelKeys = []string{"condition", "status"} + metric.LabelValues = append([]string{string(c.Type)}, metric.LabelValues...) + } + f = append(f, ms...) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_phase", + Type: metrics.MetricTypeGauge, + Help: "The phase the node is currently in.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Set current phase to 1, others to 0 if it is set. + if p := n.Status.Phase; p != "" { + f = append(f, + &metrics.Metric{ + LabelValues: []string{string(v1.NodePending)}, + Value: boolFloat64(p == v1.NodePending), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.NodeRunning)}, + Value: boolFloat64(p == v1.NodeRunning), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.NodeTerminated)}, + Value: boolFloat64(p == v1.NodeTerminated), + }, + ) + } + + for _, metric := range f { + metric.Name = "kube_node_status_phase" + metric.LabelKeys = []string{"phase"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_capacity", + Type: metrics.MetricTypeGauge, + Help: "The capacity for different resources of a node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + capacity := n.Status.Capacity + for resourceName, val := range capacity { + switch resourceName { + case v1.ResourceCPU: + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitCore), + }, + Value: float64(val.MilliValue()) / 1000, + }) + case v1.ResourceStorage: + fallthrough + case v1.ResourceEphemeralStorage: + fallthrough + case v1.ResourceMemory: + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitByte), + }, + Value: float64(val.MilliValue()) / 1000, + }) + case v1.ResourcePods: + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitInteger), + }, + Value: float64(val.MilliValue()) / 1000, + }) + default: + if helper.IsHugePageResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitByte), + }, + Value: float64(val.MilliValue()) / 1000, + }) + } + if helper.IsAttachableVolumeResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitByte), + }, + Value: float64(val.MilliValue()) / 1000, + }) + } + if helper.IsExtendedResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitInteger), + }, + Value: float64(val.MilliValue()) / 1000, + }) + } + } + } + + for _, metric := range f { + metric.Name = "kube_node_status_capacity" + metric.LabelKeys = []string{"resource", "unit"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_capacity_pods", + Type: metrics.MetricTypeGauge, + Help: "The total pod resources of the node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Add capacity and allocatable resources if they are set. + if v, ok := n.Status.Capacity[v1.ResourcePods]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_node_status_capacity_pods", + Value: float64(v.MilliValue()) / 1000, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_capacity_cpu_cores", + Type: metrics.MetricTypeGauge, + Help: "The total CPU resources of the node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Add capacity and allocatable resources if they are set. + if v, ok := n.Status.Capacity[v1.ResourceCPU]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_node_status_capacity_cpu_cores", + Value: float64(v.MilliValue()) / 1000, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_capacity_memory_bytes", + Type: metrics.MetricTypeGauge, + Help: "The total memory resources of the node.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Add capacity and allocatable resources if they are set. + if v, ok := n.Status.Capacity[v1.ResourceMemory]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_node_status_capacity_memory_bytes", + Value: float64(v.MilliValue()) / 1000, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_allocatable", + Type: metrics.MetricTypeGauge, + Help: "The allocatable for different resources of a node that are available for scheduling.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + allocatable := n.Status.Allocatable + + for resourceName, val := range allocatable { + switch resourceName { + case v1.ResourceCPU: + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitCore), + }, + Value: float64(val.MilliValue()) / 1000, + }) + case v1.ResourceStorage: + fallthrough + case v1.ResourceEphemeralStorage: + fallthrough + case v1.ResourceMemory: + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitByte), + }, + Value: float64(val.MilliValue()) / 1000, + }) + case v1.ResourcePods: + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitInteger), + }, + Value: float64(val.MilliValue()) / 1000, + }) + default: + if helper.IsHugePageResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitByte), + }, + Value: float64(val.MilliValue()) / 1000, + }) + } + if helper.IsAttachableVolumeResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitByte), + }, + Value: float64(val.MilliValue()) / 1000, + }) + } + if helper.IsExtendedResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{ + sanitizeLabelName(string(resourceName)), + string(constant.UnitInteger), + }, + Value: float64(val.MilliValue()) / 1000, + }) + } + } + } + + for _, m := range f { + m.Name = "kube_node_status_allocatable" + m.LabelKeys = []string{"resource", "unit"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_allocatable_pods", + Type: metrics.MetricTypeGauge, + Help: "The pod resources of a node that are available for scheduling.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Add capacity and allocatable resources if they are set. + if v, ok := n.Status.Allocatable[v1.ResourcePods]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_node_status_allocatable_pods", + Value: float64(v.MilliValue()) / 1000, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_allocatable_cpu_cores", + Type: metrics.MetricTypeGauge, + Help: "The CPU resources of a node that are available for scheduling.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Add capacity and allocatable resources if they are set. + if v, ok := n.Status.Allocatable[v1.ResourceCPU]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_node_status_allocatable_cpu_cores", + Value: float64(v.MilliValue()) / 1000, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_node_status_allocatable_memory_bytes", + Type: metrics.MetricTypeGauge, + Help: "The memory resources of a node that are available for scheduling.", + GenerateFunc: wrapNodeFunc(func(n *v1.Node) metrics.Family { + f := metrics.Family{} + + // Add capacity and allocatable resources if they are set. + if v, ok := n.Status.Allocatable[v1.ResourceMemory]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_node_status_allocatable_memory_bytes", + Value: float64(v.MilliValue()) / 1000, + }) + } + + return f + }), + }, + } +) + +func wrapNodeFunc(f func(*v1.Node) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + node := obj.(*v1.Node) + + metricFamily := f(node) + + for _, m := range metricFamily { + m.LabelKeys = append(descNodeLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{node.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createNodeListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Nodes().List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Nodes().Watch(opts) + }, + } +} diff --git a/pkg/collectors/node_test.go b/pkg/collectors/node_test.go new file mode 100644 index 0000000000..d918dd829c --- /dev/null +++ b/pkg/collectors/node_test.go @@ -0,0 +1,310 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNodeCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_node_created Unix creation timestamp + # TYPE kube_node_created gauge + # HELP kube_node_info Information about a cluster node. + # TYPE kube_node_info gauge + # HELP kube_node_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_node_labels gauge + # HELP kube_node_spec_unschedulable Whether a node can schedule new pods. + # TYPE kube_node_spec_unschedulable gauge + # HELP kube_node_spec_taint The taint of a cluster node. + # TYPE kube_node_spec_taint gauge + # TYPE kube_node_status_phase gauge + # HELP kube_node_status_phase The phase the node is currently in. + # TYPE kube_node_status_capacity gauge + # HELP kube_node_status_capacity The capacity for different resources of a node. + # TYPE kube_node_status_capacity_pods gauge + # HELP kube_node_status_capacity_pods The total pod resources of the node. + # TYPE kube_node_status_capacity_cpu_cores gauge + # HELP kube_node_status_capacity_cpu_cores The total CPU resources of the node. + # TYPE kube_node_status_capacity_memory_bytes gauge + # HELP kube_node_status_capacity_memory_bytes The total memory resources of the node. + # TYPE kube_node_status_allocatable gauge + # HELP kube_node_status_allocatable The allocatable for different resources of a node that are available for scheduling. + # TYPE kube_node_status_allocatable_pods gauge + # HELP kube_node_status_allocatable_pods The pod resources of a node that are available for scheduling. + # TYPE kube_node_status_allocatable_cpu_cores gauge + # HELP kube_node_status_allocatable_cpu_cores The CPU resources of a node that are available for scheduling. + # TYPE kube_node_status_allocatable_memory_bytes gauge + # HELP kube_node_status_allocatable_memory_bytes The memory resources of a node that are available for scheduling. + # HELP kube_node_status_condition The condition of a cluster node. + # TYPE kube_node_status_condition gauge + ` + cases := []generateMetricsTestCase{ + // Verify populating base metrics and that metrics for unset fields are skipped. + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.1", + }, + Status: v1.NodeStatus{ + NodeInfo: v1.NodeSystemInfo{ + KernelVersion: "kernel", + KubeletVersion: "kubelet", + KubeProxyVersion: "kubeproxy", + OSImage: "osimage", + ContainerRuntimeVersion: "rkt", + }, + }, + Spec: v1.NodeSpec{ + ProviderID: "provider://i-uniqueid", + }, + }, + Want: ` + kube_node_info{container_runtime_version="rkt",kernel_version="kernel",kubelet_version="kubelet",kubeproxy_version="kubeproxy",node="127.0.0.1",os_image="osimage",provider_id="provider://i-uniqueid"} 1 + kube_node_labels{node="127.0.0.1"} 1 + kube_node_spec_unschedulable{node="127.0.0.1"} 0 + `, + }, + // Verify resource metrics. + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Labels: map[string]string{ + "type": "master", + }, + }, + Spec: v1.NodeSpec{ + Unschedulable: true, + ProviderID: "provider://i-randomidentifier", + }, + Status: v1.NodeStatus{ + NodeInfo: v1.NodeSystemInfo{ + KernelVersion: "kernel", + KubeletVersion: "kubelet", + KubeProxyVersion: "kubeproxy", + OSImage: "osimage", + ContainerRuntimeVersion: "rkt", + }, + Capacity: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4.3"), + v1.ResourceMemory: resource.MustParse("2G"), + v1.ResourcePods: resource.MustParse("1000"), + v1.ResourceStorage: resource.MustParse("3G"), + v1.ResourceEphemeralStorage: resource.MustParse("4G"), + v1.ResourceName("nvidia.com/gpu"): resource.MustParse("4"), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("3"), + v1.ResourceMemory: resource.MustParse("1G"), + v1.ResourcePods: resource.MustParse("555"), + v1.ResourceStorage: resource.MustParse("2G"), + v1.ResourceEphemeralStorage: resource.MustParse("3G"), + v1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"), + }, + }, + }, + Want: ` + kube_node_created{node="127.0.0.1"} 1.5e+09 + kube_node_info{container_runtime_version="rkt",kernel_version="kernel",kubelet_version="kubelet",kubeproxy_version="kubeproxy",node="127.0.0.1",os_image="osimage",provider_id="provider://i-randomidentifier"} 1 + kube_node_labels{label_type="master",node="127.0.0.1"} 1 + kube_node_spec_unschedulable{node="127.0.0.1"} 1 + kube_node_status_allocatable_cpu_cores{node="127.0.0.1"} 3 + kube_node_status_allocatable_memory_bytes{node="127.0.0.1"} 1e+09 + kube_node_status_allocatable_pods{node="127.0.0.1"} 555 + kube_node_status_allocatable{node="127.0.0.1",resource="cpu",unit="core"} 3 + kube_node_status_allocatable{node="127.0.0.1",resource="ephemeral_storage",unit="byte"} 3e+09 + kube_node_status_allocatable{node="127.0.0.1",resource="memory",unit="byte"} 1e+09 + kube_node_status_allocatable{node="127.0.0.1",resource="nvidia_com_gpu",unit="integer"} 1 + kube_node_status_allocatable{node="127.0.0.1",resource="pods",unit="integer"} 555 + kube_node_status_allocatable{node="127.0.0.1",resource="storage",unit="byte"} 2e+09 + kube_node_status_capacity_cpu_cores{node="127.0.0.1"} 4.3 + kube_node_status_capacity_memory_bytes{node="127.0.0.1"} 2e+09 + kube_node_status_capacity_pods{node="127.0.0.1"} 1000 + kube_node_status_capacity{node="127.0.0.1",resource="cpu",unit="core"} 4.3 + kube_node_status_capacity{node="127.0.0.1",resource="ephemeral_storage",unit="byte"} 4e+09 + kube_node_status_capacity{node="127.0.0.1",resource="memory",unit="byte"} 2e+09 + kube_node_status_capacity{node="127.0.0.1",resource="nvidia_com_gpu",unit="integer"} 4 + kube_node_status_capacity{node="127.0.0.1",resource="pods",unit="integer"} 1000 + kube_node_status_capacity{node="127.0.0.1",resource="storage",unit="byte"} 3e+09 + `, + }, + // Verify phase enumerations. + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.1", + }, + Status: v1.NodeStatus{ + Phase: v1.NodeRunning, + }, + }, + Want: ` + kube_node_status_phase{node="127.0.0.1",phase="Terminated"} 0 + kube_node_status_phase{node="127.0.0.1",phase="Running"} 1 + kube_node_status_phase{node="127.0.0.1",phase="Pending"} 0 +`, + MetricNames: []string{"kube_node_status_phase"}, + }, + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.2", + }, + Status: v1.NodeStatus{ + Phase: v1.NodePending, + }, + }, + Want: ` + kube_node_status_phase{node="127.0.0.2",phase="Terminated"} 0 + kube_node_status_phase{node="127.0.0.2",phase="Running"} 0 + kube_node_status_phase{node="127.0.0.2",phase="Pending"} 1 +`, + MetricNames: []string{"kube_node_status_phase"}, + }, + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.3", + }, + Status: v1.NodeStatus{ + Phase: v1.NodeTerminated, + }, + }, + Want: ` + kube_node_status_phase{node="127.0.0.3",phase="Terminated"} 1 + kube_node_status_phase{node="127.0.0.3",phase="Running"} 0 + kube_node_status_phase{node="127.0.0.3",phase="Pending"} 0 +`, + MetricNames: []string{"kube_node_status_phase"}, + }, + // Verify StatusCondition + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.1", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, + {Type: v1.NodeReady, Status: v1.ConditionTrue}, + {Type: v1.NodeConditionType("CustomizedType"), Status: v1.ConditionTrue}, + }, + }, + }, + Want: ` + kube_node_status_condition{condition="CustomizedType",node="127.0.0.1",status="false"} 0 + kube_node_status_condition{condition="CustomizedType",node="127.0.0.1",status="true"} 1 + kube_node_status_condition{condition="CustomizedType",node="127.0.0.1",status="unknown"} 0 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.1",status="false"} 0 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.1",status="true"} 1 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.1",status="unknown"} 0 + kube_node_status_condition{condition="Ready",node="127.0.0.1",status="false"} 0 + kube_node_status_condition{condition="Ready",node="127.0.0.1",status="true"} 1 + kube_node_status_condition{condition="Ready",node="127.0.0.1",status="unknown"} 0 +`, + MetricNames: []string{"kube_node_status_condition"}, + }, + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.2", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionUnknown}, + {Type: v1.NodeReady, Status: v1.ConditionUnknown}, + {Type: v1.NodeConditionType("CustomizedType"), Status: v1.ConditionUnknown}, + }, + }, + }, + Want: ` + kube_node_status_condition{condition="CustomizedType",node="127.0.0.2",status="false"} 0 + kube_node_status_condition{condition="CustomizedType",node="127.0.0.2",status="true"} 0 + kube_node_status_condition{condition="CustomizedType",node="127.0.0.2",status="unknown"} 1 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.2",status="false"} 0 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.2",status="true"} 0 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.2",status="unknown"} 1 + kube_node_status_condition{condition="Ready",node="127.0.0.2",status="false"} 0 + kube_node_status_condition{condition="Ready",node="127.0.0.2",status="true"} 0 + kube_node_status_condition{condition="Ready",node="127.0.0.2",status="unknown"} 1 +`, + MetricNames: []string{"kube_node_status_condition"}, + }, + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.3", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse}, + {Type: v1.NodeReady, Status: v1.ConditionFalse}, + {Type: v1.NodeConditionType("CustomizedType"), Status: v1.ConditionFalse}, + }, + }, + }, + Want: ` + kube_node_status_condition{condition="CustomizedType",node="127.0.0.3",status="false"} 1 + kube_node_status_condition{condition="CustomizedType",node="127.0.0.3",status="true"} 0 + kube_node_status_condition{condition="CustomizedType",node="127.0.0.3",status="unknown"} 0 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.3",status="false"} 1 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.3",status="true"} 0 + kube_node_status_condition{condition="NetworkUnavailable",node="127.0.0.3",status="unknown"} 0 + kube_node_status_condition{condition="Ready",node="127.0.0.3",status="false"} 1 + kube_node_status_condition{condition="Ready",node="127.0.0.3",status="true"} 0 + kube_node_status_condition{condition="Ready",node="127.0.0.3",status="unknown"} 0 + `, + MetricNames: []string{"kube_node_status_condition"}, + }, + // Verify SpecTaints + { + Obj: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "127.0.0.1", + }, + Spec: v1.NodeSpec{ + Taints: []v1.Taint{ + {Key: "node.kubernetes.io/memory-pressure", Value: "true", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "Accelerated", Value: "gpu", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "Dedicated", Effect: v1.TaintEffectPreferNoSchedule}, + }, + }, + }, + Want: ` + kube_node_spec_taint{effect="PreferNoSchedule",key="Dedicated",node="127.0.0.1",value=""} 1 + kube_node_spec_taint{effect="PreferNoSchedule",key="Accelerated",node="127.0.0.1",value="gpu"} 1 + kube_node_spec_taint{effect="PreferNoSchedule",key="node.kubernetes.io/memory-pressure",node="127.0.0.1",value="true"} 1 + `, + MetricNames: []string{"kube_node_spec_taint"}, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(nodeMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/persistentvolume.go b/pkg/collectors/persistentvolume.go new file mode 100644 index 0000000000..f58e311268 --- /dev/null +++ b/pkg/collectors/persistentvolume.go @@ -0,0 +1,131 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descPersistentVolumeLabelsName = "kube_persistentvolume_labels" + descPersistentVolumeLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descPersistentVolumeLabelsDefaultLabels = []string{"persistentvolume"} + + persistentVolumeMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: descPersistentVolumeLabelsName, + Type: metrics.MetricTypeGauge, + Help: descPersistentVolumeLabelsHelp, + GenerateFunc: wrapPersistentVolumeFunc(func(p *v1.PersistentVolume) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(p.Labels) + return metrics.Family{&metrics.Metric{ + Name: descPersistentVolumeLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_persistentvolume_status_phase", + Type: metrics.MetricTypeGauge, + Help: "The phase indicates if a volume is available, bound to a claim, or released by a claim.", + GenerateFunc: wrapPersistentVolumeFunc(func(p *v1.PersistentVolume) metrics.Family { + f := metrics.Family{} + + // Set current phase to 1, others to 0 if it is set. + if p := p.Status.Phase; p != "" { + f = append(f, + &metrics.Metric{ + LabelValues: []string{string(v1.VolumePending)}, + Value: boolFloat64(p == v1.VolumePending), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.VolumeAvailable)}, + Value: boolFloat64(p == v1.VolumeAvailable), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.VolumeBound)}, + Value: boolFloat64(p == v1.VolumeBound), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.VolumeReleased)}, + Value: boolFloat64(p == v1.VolumeReleased), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.VolumeFailed)}, + Value: boolFloat64(p == v1.VolumeFailed), + }, + ) + } + + for _, m := range f { + m.Name = "kube_persistentvolume_status_phase" + m.LabelKeys = []string{"phase"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_persistentvolume_info", + Type: metrics.MetricTypeGauge, + Help: "Information about persistentvolume.", + GenerateFunc: wrapPersistentVolumeFunc(func(p *v1.PersistentVolume) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_persistentvolume_info", + LabelKeys: []string{"storageclass"}, + LabelValues: []string{p.Spec.StorageClassName}, + Value: 1, + }} + }), + }, + } +) + +func wrapPersistentVolumeFunc(f func(*v1.PersistentVolume) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + persistentVolume := obj.(*v1.PersistentVolume) + + metricFamily := f(persistentVolume) + + for _, m := range metricFamily { + m.LabelKeys = append(descPersistentVolumeLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{persistentVolume.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createPersistentVolumeListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().PersistentVolumes().List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().PersistentVolumes().Watch(opts) + }, + } +} diff --git a/pkg/collectors/persistentvolume_test.go b/pkg/collectors/persistentvolume_test.go new file mode 100644 index 0000000000..3e64abe41c --- /dev/null +++ b/pkg/collectors/persistentvolume_test.go @@ -0,0 +1,210 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPersistentVolumeCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_persistentvolume_status_phase The phase indicates if a volume is available, bound to a claim, or released by a claim. + # TYPE kube_persistentvolume_status_phase gauge + # HELP kube_persistentvolume_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_persistentvolume_labels gauge + # HELP kube_persistentvolume_info Information about persistentvolume. + # TYPE kube_persistentvolume_info gauge + ` + cases := []generateMetricsTestCase{ + // Verify phase enumerations. + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-pending", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumePending, + }, + }, + Want: ` + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Available"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Bound"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Failed"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Pending"} 1 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Released"} 0 +`, + MetricNames: []string{ + "kube_persistentvolume_status_phase", + }, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-available", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, + }, + Want: ` + kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Available"} 1 + kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Bound"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Failed"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Pending"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-available",phase="Released"} 0 +`, + MetricNames: []string{"kube_persistentvolume_status_phase"}, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-bound", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeBound, + }, + }, + Want: ` + kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Available"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Bound"} 1 + kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Failed"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Pending"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-bound",phase="Released"} 0 +`, + MetricNames: []string{"kube_persistentvolume_status_phase"}, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-released", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeReleased, + }, + }, + Want: ` + kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Available"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Bound"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Failed"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Pending"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-released",phase="Released"} 1 +`, + MetricNames: []string{"kube_persistentvolume_status_phase"}, + }, + { + + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-failed", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeFailed, + }, + }, + Want: ` + kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Available"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Bound"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Failed"} 1 + kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Pending"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-failed",phase="Released"} 0 +`, + MetricNames: []string{"kube_persistentvolume_status_phase"}, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-pending", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumePending, + }, + Spec: v1.PersistentVolumeSpec{ + StorageClassName: "test", + }, + }, + Want: ` + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Available"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Bound"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Failed"} 0 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Pending"} 1 + kube_persistentvolume_status_phase{persistentvolume="test-pv-pending",phase="Released"} 0 +`, + MetricNames: []string{ + "kube_persistentvolume_status_phase", + }, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-available", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, + }, + Want: ` + kube_persistentvolume_info{persistentvolume="test-pv-available",storageclass=""} 1 + `, + MetricNames: []string{"kube_persistentvolume_info"}, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-labeled-pv", + Labels: map[string]string{ + "app": "mysql-server", + }, + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumePending, + }, + Spec: v1.PersistentVolumeSpec{ + StorageClassName: "test", + }, + }, + Want: ` + kube_persistentvolume_labels{label_app="mysql-server",persistentvolume="test-labeled-pv"} 1 + `, + MetricNames: []string{"kube_persistentvolume_labels"}, + }, + { + Obj: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-unlabeled-pv", + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, + }, + Want: ` + kube_persistentvolume_labels{persistentvolume="test-unlabeled-pv"} 1 + `, + MetricNames: []string{"kube_persistentvolume_labels"}, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(persistentVolumeMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/persistentvolumeclaim.go b/pkg/collectors/persistentvolumeclaim.go new file mode 100644 index 0000000000..741aaeb6cd --- /dev/null +++ b/pkg/collectors/persistentvolumeclaim.go @@ -0,0 +1,156 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descPersistentVolumeClaimLabelsName = "kube_persistentvolumeclaim_labels" + descPersistentVolumeClaimLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descPersistentVolumeClaimLabelsDefaultLabels = []string{"namespace", "persistentvolumeclaim"} + + persistentVolumeClaimMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: descPersistentVolumeClaimLabelsName, + Type: metrics.MetricTypeGauge, + Help: descPersistentVolumeClaimLabelsHelp, + GenerateFunc: wrapPersistentVolumeClaimFunc(func(p *v1.PersistentVolumeClaim) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(p.Labels) + return metrics.Family{&metrics.Metric{ + Name: descPersistentVolumeClaimLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_persistentvolumeclaim_info", + Type: metrics.MetricTypeGauge, + Help: "Information about persistent volume claim.", + GenerateFunc: wrapPersistentVolumeClaimFunc(func(p *v1.PersistentVolumeClaim) metrics.Family { + storageClassName := getPersistentVolumeClaimClass(p) + volumeName := p.Spec.VolumeName + return metrics.Family{&metrics.Metric{ + Name: "kube_persistentvolumeclaim_info", + LabelKeys: []string{"storageclass", "volumename"}, + LabelValues: []string{storageClassName, volumeName}, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_persistentvolumeclaim_status_phase", + Type: metrics.MetricTypeGauge, + Help: "The phase the persistent volume claim is currently in.", + GenerateFunc: wrapPersistentVolumeClaimFunc(func(p *v1.PersistentVolumeClaim) metrics.Family { + f := metrics.Family{} + // Set current phase to 1, others to 0 if it is set. + if p := p.Status.Phase; p != "" { + f = append(f, + &metrics.Metric{ + LabelValues: []string{string(v1.ClaimLost)}, + Value: boolFloat64(p == v1.ClaimLost), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.ClaimBound)}, + Value: boolFloat64(p == v1.ClaimBound), + }, + &metrics.Metric{ + LabelValues: []string{string(v1.ClaimPending)}, + Value: boolFloat64(p == v1.ClaimPending), + }, + ) + } + + for _, m := range f { + m.Name = "kube_persistentvolumeclaim_status_phase" + m.LabelKeys = []string{"phase"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_persistentvolumeclaim_resource_requests_storage_bytes", + Type: metrics.MetricTypeGauge, + Help: "The capacity of storage requested by the persistent volume claim.", + GenerateFunc: wrapPersistentVolumeClaimFunc(func(p *v1.PersistentVolumeClaim) metrics.Family { + f := metrics.Family{} + if storage, ok := p.Spec.Resources.Requests[v1.ResourceStorage]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_persistentvolumeclaim_resource_requests_storage_bytes", + Value: float64(storage.Value()), + }) + } + + return f + }), + }, + } +) + +func wrapPersistentVolumeClaimFunc(f func(*v1.PersistentVolumeClaim) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + persistentVolumeClaim := obj.(*v1.PersistentVolumeClaim) + + metricFamily := f(persistentVolumeClaim) + + for _, m := range metricFamily { + m.LabelKeys = append(descPersistentVolumeClaimLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{persistentVolumeClaim.Namespace, persistentVolumeClaim.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createPersistentVolumeClaimListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().PersistentVolumeClaims(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().PersistentVolumeClaims(ns).Watch(opts) + }, + } +} + +// getPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func getPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + // Special non-empty string to indicate absence of storage class. + return "" +} diff --git a/pkg/collectors/persistentvolumeclaim_test.go b/pkg/collectors/persistentvolumeclaim_test.go new file mode 100644 index 0000000000..49a1e612c0 --- /dev/null +++ b/pkg/collectors/persistentvolumeclaim_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPersistentVolumeClaimCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_persistentvolumeclaim_info Information about persistent volume claim. + # TYPE kube_persistentvolumeclaim_info gauge + # HELP kube_persistentvolumeclaim_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_persistentvolumeclaim_labels gauge + # HELP kube_persistentvolumeclaim_status_phase The phase the persistent volume claim is currently in. + # TYPE kube_persistentvolumeclaim_status_phase gauge + # HELP kube_persistentvolumeclaim_resource_requests_storage_bytes The capacity of storage requested by the persistent volume claim. + # TYPE kube_persistentvolumeclaim_resource_requests_storage_bytes gauge + ` + storageClassName := "rbd" + cases := []generateMetricsTestCase{ + // Verify phase enumerations. + { + Obj: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mysql-data", + Namespace: "default", + Labels: map[string]string{ + "app": "mysql-server", + }, + }, + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + VolumeName: "pvc-mysql-data", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + Want: ` + kube_persistentvolumeclaim_info{namespace="default",persistentvolumeclaim="mysql-data",storageclass="rbd",volumename="pvc-mysql-data"} 1 + kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="mysql-data",phase="Bound"} 1 + kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="mysql-data",phase="Lost"} 0 + kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="mysql-data",phase="Pending"} 0 + kube_persistentvolumeclaim_resource_requests_storage_bytes{namespace="default",persistentvolumeclaim="mysql-data"} 1.073741824e+09 + kube_persistentvolumeclaim_labels{label_app="mysql-server",namespace="default",persistentvolumeclaim="mysql-data"} 1 +`, + MetricNames: []string{"kube_persistentvolumeclaim_info", "kube_persistentvolumeclaim_status_phase", "kube_persistentvolumeclaim_resource_requests_storage_bytes", "kube_persistentvolumeclaim_labels"}, + }, + { + Obj: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "prometheus-data", + Namespace: "default", + }, + Spec: v1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, + VolumeName: "pvc-prometheus-data", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimPending, + }, + }, + Want: ` + kube_persistentvolumeclaim_info{namespace="default",persistentvolumeclaim="prometheus-data",storageclass="rbd",volumename="pvc-prometheus-data"} 1 + kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="prometheus-data",phase="Bound"} 0 + kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="prometheus-data",phase="Lost"} 0 + kube_persistentvolumeclaim_status_phase{namespace="default",persistentvolumeclaim="prometheus-data",phase="Pending"} 1 + kube_persistentvolumeclaim_labels{namespace="default",persistentvolumeclaim="prometheus-data"} 1 + `, + MetricNames: []string{"kube_persistentvolumeclaim_info", "kube_persistentvolumeclaim_status_phase", "kube_persistentvolumeclaim_resource_requests_storage_bytes", "kube_persistentvolumeclaim_labels"}, + }, + { + Obj: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongo-data", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimLost, + }, + }, + Want: ` + kube_persistentvolumeclaim_info{namespace="",persistentvolumeclaim="mongo-data",storageclass="",volumename=""} 1 + kube_persistentvolumeclaim_status_phase{namespace="",persistentvolumeclaim="mongo-data",phase="Bound"} 0 + kube_persistentvolumeclaim_status_phase{namespace="",persistentvolumeclaim="mongo-data",phase="Lost"} 1 + kube_persistentvolumeclaim_status_phase{namespace="",persistentvolumeclaim="mongo-data",phase="Pending"} 0 + kube_persistentvolumeclaim_labels{namespace="",persistentvolumeclaim="mongo-data"} 1 +`, + MetricNames: []string{"kube_persistentvolumeclaim_info", "kube_persistentvolumeclaim_status_phase", "kube_persistentvolumeclaim_resource_requests_storage_bytes", "kube_persistentvolumeclaim_labels"}, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(persistentVolumeClaimMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/pod.go b/pkg/collectors/pod.go new file mode 100644 index 0000000000..50b63bf997 --- /dev/null +++ b/pkg/collectors/pod.go @@ -0,0 +1,766 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "strconv" + + "k8s.io/kube-state-metrics/pkg/constant" + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/util/node" +) + +var ( + descPodLabelsDefaultLabels = []string{"namespace", "pod"} + containerWaitingReasons = []string{"ContainerCreating", "CrashLoopBackOff", "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff"} + containerTerminatedReasons = []string{"OOMKilled", "Completed", "Error", "ContainerCannotRun"} + + podMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_pod_info", + Type: metrics.MetricTypeGauge, + Help: "Information about pod.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + createdBy := metav1.GetControllerOf(p) + createdByKind := "" + createdByName := "" + if createdBy != nil { + if createdBy.Kind != "" { + createdByKind = createdBy.Kind + } + if createdBy.Name != "" { + createdByName = createdBy.Name + } + } + + m := metrics.Metric{ + Name: "kube_pod_info", + LabelKeys: []string{"host_ip", "pod_ip", "uid", "node", "created_by_kind", "created_by_name"}, + LabelValues: []string{p.Status.HostIP, p.Status.PodIP, string(p.UID), p.Spec.NodeName, createdByKind, createdByName}, + Value: 1, + } + + return metrics.Family{&m} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_start_time", + Type: metrics.MetricTypeGauge, + Help: "Start time in unix timestamp for a pod.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + if p.Status.StartTime != nil { + f = append(f, &metrics.Metric{ + Name: "kube_pod_start_time", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64((*(p.Status.StartTime)).Unix()), + }) + } + + return f + + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_completion_time", + Type: metrics.MetricTypeGauge, + Help: "Completion time in unix timestamp for a pod.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + var lastFinishTime float64 + for _, cs := range p.Status.ContainerStatuses { + if cs.State.Terminated != nil { + if lastFinishTime == 0 || lastFinishTime < float64(cs.State.Terminated.FinishedAt.Unix()) { + lastFinishTime = float64(cs.State.Terminated.FinishedAt.Unix()) + } + } + } + + if lastFinishTime > 0 { + f = append(f, &metrics.Metric{ + Name: "kube_pod_completion_time", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: lastFinishTime, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_owner", + Type: metrics.MetricTypeGauge, + Help: "Information about the Pod's owner.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + labelKeys := []string{"owner_kind", "owner_name", "owner_is_controller"} + f := metrics.Family{} + + owners := p.GetOwnerReferences() + if len(owners) == 0 { + f = append(f, &metrics.Metric{ + Name: "kube_pod_owner", + LabelKeys: labelKeys, + LabelValues: []string{"", "", ""}, + Value: 1, + }) + } else { + for _, owner := range owners { + if owner.Controller != nil { + f = append(f, &metrics.Metric{ + Name: "kube_pod_owner", + LabelKeys: labelKeys, + LabelValues: []string{owner.Kind, owner.Name, strconv.FormatBool(*owner.Controller)}, + Value: 1, + }) + } else { + f = append(f, &metrics.Metric{ + Name: "kube_pod_owner", + LabelKeys: labelKeys, + LabelValues: []string{owner.Kind, owner.Name, "false"}, + Value: 1, + }) + } + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_labels", + Type: metrics.MetricTypeGauge, + Help: "Kubernetes labels converted to Prometheus labels.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(p.Labels) + m := metrics.Metric{ + Name: "kube_pod_labels", + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + } + return metrics.Family{&m} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + if !p.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_pod_created", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(p.CreationTimestamp.Unix()), + }) + } + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_status_scheduled_time", + Type: metrics.MetricTypeGauge, + Help: "Unix timestamp when pod moved into scheduled status", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Status.Conditions { + switch c.Type { + case v1.PodScheduled: + if c.Status == v1.ConditionTrue { + f = append(f, &metrics.Metric{ + Name: "kube_pod_status_scheduled_time", + LabelKeys: []string{}, + LabelValues: []string{}, + Value: float64(c.LastTransitionTime.Unix()), + }) + } + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_status_phase", + Type: metrics.MetricTypeGauge, + Help: "The pods current phase.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + phase := p.Status.Phase + if phase == "" { + return f + } + + phases := []struct { + v bool + n string + }{ + {phase == v1.PodPending, string(v1.PodPending)}, + {phase == v1.PodSucceeded, string(v1.PodSucceeded)}, + {phase == v1.PodFailed, string(v1.PodFailed)}, + // This logic is directly copied from: https://github.com/kubernetes/kubernetes/blob/d39bfa0d138368bbe72b0eaf434501dcb4ec9908/pkg/printers/internalversion/printers.go#L597-L601 + // For more info, please go to: https://github.com/kubernetes/kube-state-metrics/issues/410 + {phase == v1.PodRunning && !(p.DeletionTimestamp != nil && p.Status.Reason == node.NodeUnreachablePodReason), string(v1.PodRunning)}, + {phase == v1.PodUnknown || (p.DeletionTimestamp != nil && p.Status.Reason == node.NodeUnreachablePodReason), string(v1.PodUnknown)}, + } + + for _, p := range phases { + f = append(f, &metrics.Metric{ + Name: "kube_pod_status_phase", + LabelKeys: []string{"phase"}, + LabelValues: []string{p.n}, + Value: boolFloat64(p.v), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_status_ready", + Type: metrics.MetricTypeGauge, + Help: "Describes whether the pod is ready to serve requests.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Status.Conditions { + switch c.Type { + case v1.PodReady: + ms := addConditionMetrics(c.Status) + + for _, m := range ms { + metric := m + metric.Name = "kube_pod_status_ready" + metric.LabelKeys = []string{"condition"} + f = append(f, metric) + } + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_status_scheduled", + Type: metrics.MetricTypeGauge, + Help: "Describes the status of the scheduling process for the pod.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Status.Conditions { + switch c.Type { + case v1.PodScheduled: + ms := addConditionMetrics(c.Status) + + for _, m := range ms { + metric := m + metric.Name = "kube_pod_status_scheduled" + metric.LabelKeys = []string{"condition"} + f = append(f, metric) + } + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_info", + Type: metrics.MetricTypeGauge, + Help: "Information about a container in a pod.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + labelKeys := []string{"container", "image", "image_id", "container_id"} + + for _, cs := range p.Status.ContainerStatuses { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_info", + LabelKeys: labelKeys, + LabelValues: []string{cs.Name, cs.Image, cs.ImageID, cs.ContainerID}, + Value: 1, + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_waiting", + Type: metrics.MetricTypeGauge, + Help: "Describes whether the container is currently in waiting state.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_waiting", + LabelKeys: []string{"container"}, + LabelValues: []string{cs.Name}, + Value: boolFloat64(cs.State.Waiting != nil), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_waiting_reason", + Type: metrics.MetricTypeGauge, + Help: "Describes the reason the container is currently in waiting state.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + for _, reason := range containerWaitingReasons { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_waiting_reason", + LabelKeys: []string{"container", "reason"}, + LabelValues: []string{cs.Name, reason}, + Value: boolFloat64(waitingReason(cs, reason)), + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_running", + Type: metrics.MetricTypeGauge, + Help: "Describes whether the container is currently in running state.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_running", + LabelKeys: []string{"container"}, + LabelValues: []string{cs.Name}, + Value: boolFloat64(cs.State.Running != nil), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_terminated", + Type: metrics.MetricTypeGauge, + Help: "Describes whether the container is currently in terminated state.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_terminated", + LabelKeys: []string{"container"}, + LabelValues: []string{cs.Name}, + Value: boolFloat64(cs.State.Terminated != nil), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_terminated_reason", + Type: metrics.MetricTypeGauge, + Help: "Describes the reason the container is currently in terminated state.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + for _, reason := range containerTerminatedReasons { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_terminated_reason", + LabelKeys: []string{"container", "reason"}, + LabelValues: []string{cs.Name, reason}, + Value: boolFloat64(terminationReason(cs, reason)), + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_last_terminated_reason", + Type: metrics.MetricTypeGauge, + Help: "Describes the last reason the container was in terminated state.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + for _, reason := range containerTerminatedReasons { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_last_terminated_reason", + LabelKeys: []string{"container", "reason"}, + LabelValues: []string{cs.Name, reason}, + Value: boolFloat64(lastTerminationReason(cs, reason)), + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_ready", + Type: metrics.MetricTypeGauge, + Help: "Describes whether the containers readiness check succeeded.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_ready", + LabelKeys: []string{"container"}, + LabelValues: []string{cs.Name}, + Value: boolFloat64(cs.Ready), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_status_restarts_total", + Type: metrics.MetricTypeCounter, + Help: "The number of container restarts per container.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, cs := range p.Status.ContainerStatuses { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_status_restarts_total", + LabelKeys: []string{"container"}, + LabelValues: []string{cs.Name}, + Value: float64(cs.RestartCount), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_resource_requests", + Type: metrics.MetricTypeGauge, + Help: "The number of requested request resource by a container.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Spec.Containers { + req := c.Resources.Requests + + for resourceName, val := range req { + switch resourceName { + case v1.ResourceCPU: + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitCore)}, + Value: float64(val.MilliValue()) / 1000, + }) + case v1.ResourceStorage: + fallthrough + case v1.ResourceEphemeralStorage: + fallthrough + case v1.ResourceMemory: + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)}, + Value: float64(val.Value()), + }) + default: + if helper.IsHugePageResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)}, + Value: float64(val.Value()), + }) + } + if helper.IsAttachableVolumeResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)}, + Value: float64(val.Value()), + }) + } + if helper.IsExtendedResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitInteger)}, + Value: float64(val.Value()), + }) + } + } + } + } + + for _, family := range f { + family.Name = "kube_pod_container_resource_requests" + family.LabelKeys = []string{"container", "node", "resource", "unit"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_resource_limits", + Type: metrics.MetricTypeGauge, + Help: "The number of requested limit resource by a container.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Spec.Containers { + lim := c.Resources.Limits + + for resourceName, val := range lim { + switch resourceName { + case v1.ResourceCPU: + f = append(f, &metrics.Metric{ + Value: float64(val.MilliValue()) / 1000, + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitCore)}, + }) + case v1.ResourceStorage: + fallthrough + case v1.ResourceEphemeralStorage: + fallthrough + case v1.ResourceMemory: + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)}, + Value: float64(val.Value()), + }) + default: + if helper.IsHugePageResourceName(resourceName) { + f = append(f, &metrics.Metric{ + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)}, + Value: float64(val.Value()), + }) + } + if helper.IsAttachableVolumeResourceName(resourceName) { + f = append(f, &metrics.Metric{ + Value: float64(val.Value()), + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)}, + }) + } + if helper.IsExtendedResourceName(resourceName) { + f = append(f, &metrics.Metric{ + Value: float64(val.Value()), + LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitInteger)}, + }) + } + } + } + } + + for _, family := range f { + family.Name = "kube_pod_container_resource_limits" + family.LabelKeys = []string{"container", "node", "resource", "unit"} + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_resource_requests_cpu_cores", + Type: metrics.MetricTypeGauge, + Help: "The number of requested cpu cores by a container.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Spec.Containers { + req := c.Resources.Requests + if cpu, ok := req[v1.ResourceCPU]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_resource_requests_cpu_cores", + LabelKeys: []string{"container", "node"}, + LabelValues: []string{c.Name, p.Spec.NodeName}, + Value: float64(cpu.MilliValue()) / 1000, + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_resource_requests_memory_bytes", + Type: metrics.MetricTypeGauge, + Help: "The number of requested memory bytes by a container.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Spec.Containers { + req := c.Resources.Requests + if mem, ok := req[v1.ResourceMemory]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_resource_requests_memory_bytes", + LabelKeys: []string{"container", "node"}, + LabelValues: []string{c.Name, p.Spec.NodeName}, + Value: float64(mem.Value()), + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_resource_limits_cpu_cores", + Type: metrics.MetricTypeGauge, + Help: "The limit on cpu cores to be used by a container.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Spec.Containers { + lim := c.Resources.Limits + if cpu, ok := lim[v1.ResourceCPU]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_resource_limits_cpu_cores", + LabelKeys: []string{"container", "node"}, + LabelValues: []string{c.Name, p.Spec.NodeName}, + Value: float64(cpu.MilliValue()) / 1000, + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_container_resource_limits_memory_bytes", + Type: metrics.MetricTypeGauge, + Help: "The limit on memory to be used by a container in bytes.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, c := range p.Spec.Containers { + lim := c.Resources.Limits + + if mem, ok := lim[v1.ResourceMemory]; ok { + f = append(f, &metrics.Metric{ + Name: "kube_pod_container_resource_limits_memory_bytes", + LabelKeys: []string{"container", "node"}, + LabelValues: []string{c.Name, p.Spec.NodeName}, + Value: float64(mem.Value()), + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_spec_volumes_persistentvolumeclaims_info", + Type: metrics.MetricTypeGauge, + Help: "Information about persistentvolumeclaim volumes in a pod.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, v := range p.Spec.Volumes { + if v.PersistentVolumeClaim != nil { + f = append(f, &metrics.Metric{ + Name: "kube_pod_spec_volumes_persistentvolumeclaims_info", + LabelKeys: []string{"volume", "persistentvolumeclaim"}, + LabelValues: []string{v.Name, v.PersistentVolumeClaim.ClaimName}, + Value: 1, + }) + } + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_pod_spec_volumes_persistentvolumeclaims_readonly", + Type: metrics.MetricTypeGauge, + Help: "Describes whether a persistentvolumeclaim is mounted read only.", + GenerateFunc: wrapPodFunc(func(p *v1.Pod) metrics.Family { + f := metrics.Family{} + + for _, v := range p.Spec.Volumes { + if v.PersistentVolumeClaim != nil { + f = append(f, &metrics.Metric{ + Name: "kube_pod_spec_volumes_persistentvolumeclaims_readonly", + LabelKeys: []string{"volume", "persistentvolumeclaim"}, + LabelValues: []string{v.Name, v.PersistentVolumeClaim.ClaimName}, + Value: boolFloat64(v.PersistentVolumeClaim.ReadOnly), + }) + } + } + + return f + }), + }, + } +) + +func wrapPodFunc(f func(*v1.Pod) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + pod := obj.(*v1.Pod) + + metricFamily := f(pod) + + for _, m := range metricFamily { + m.LabelKeys = append(descPodLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{pod.Namespace, pod.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createPodListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Pods(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Pods(ns).Watch(opts) + }, + } +} + +func waitingReason(cs v1.ContainerStatus, reason string) bool { + if cs.State.Waiting == nil { + return false + } + return cs.State.Waiting.Reason == reason +} + +func terminationReason(cs v1.ContainerStatus, reason string) bool { + if cs.State.Terminated == nil { + return false + } + return cs.State.Terminated.Reason == reason +} + +func lastTerminationReason(cs v1.ContainerStatus, reason string) bool { + if cs.LastTerminationState.Terminated == nil { + return false + } + return cs.LastTerminationState.Terminated.Reason == reason +} diff --git a/pkg/collectors/pod_test.go b/pkg/collectors/pod_test.go new file mode 100644 index 0000000000..7d7117e2d0 --- /dev/null +++ b/pkg/collectors/pod_test.go @@ -0,0 +1,1064 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/util/node" +) + +func TestPodCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + var test = true + + startTime := 1501569018 + metav1StartTime := metav1.Unix(int64(startTime), 0) + + // TODO: renable metadata + const metadata = "" + // const metadata = ` + // # HELP kube_pod_created Unix creation timestamp + // # TYPE kube_pod_created gauge + // # HELP kube_pod_container_info Information about a container in a pod. + // # TYPE kube_pod_container_info gauge + // # HELP kube_pod_labels Kubernetes labels converted to Prometheus labels. + // # TYPE kube_pod_labels gauge + // # HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded. + // # TYPE kube_pod_container_status_ready gauge + // # HELP kube_pod_container_status_restarts_total The number of container restarts per container. + // # TYPE kube_pod_container_status_restarts_total counter + // # HELP kube_pod_container_status_running Describes whether the container is currently in running state. + // # TYPE kube_pod_container_status_running gauge + // # HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state. + // # TYPE kube_pod_container_status_terminated gauge + // # HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state. + // # TYPE kube_pod_container_status_terminated_reason gauge + // # HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state. + // # TYPE kube_pod_container_status_last_terminated_reason gauge + // # HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state. + // # TYPE kube_pod_container_status_waiting gauge + // # HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state. + // # TYPE kube_pod_container_status_waiting_reason gauge + // # HELP kube_pod_info Information about pod. + // # TYPE kube_pod_info gauge + // # HELP kube_pod_status_scheduled_time Unix timestamp when pod moved into scheduled status + // # TYPE kube_pod_status_scheduled_time gauge + // # HELP kube_pod_start_time Start time in unix timestamp for a pod. + // # TYPE kube_pod_start_time gauge + // # HELP kube_pod_completion_time Completion time in unix timestamp for a pod. + // # TYPE kube_pod_completion_time gauge + // # HELP kube_pod_owner Information about the Pod's owner. + // # TYPE kube_pod_owner gauge + // # HELP kube_pod_status_phase The pods current phase. + // # TYPE kube_pod_status_phase gauge + // # HELP kube_pod_status_ready Describes whether the pod is ready to serve requests. + // # TYPE kube_pod_status_ready gauge + // # HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. + // # TYPE kube_pod_status_scheduled gauge + // # HELP kube_pod_container_resource_requests The number of requested request resource by a container. + // # TYPE kube_pod_container_resource_requests gauge + // # HELP kube_pod_container_resource_limits The number of requested limit resource by a container. + // # TYPE kube_pod_container_resource_limits gauge + // # HELP kube_pod_container_resource_requests_cpu_cores The number of requested cpu cores by a container. + // # TYPE kube_pod_container_resource_requests_cpu_cores gauge + // # HELP kube_pod_container_resource_requests_memory_bytes The number of requested memory bytes by a container. + // # TYPE kube_pod_container_resource_requests_memory_bytes gauge + // # HELP kube_pod_container_resource_limits_cpu_cores The limit on cpu cores to be used by a container. + // # TYPE kube_pod_container_resource_limits_cpu_cores gauge + // # HELP kube_pod_container_resource_limits_memory_bytes The limit on memory to be used by a container in bytes. + // # TYPE kube_pod_container_resource_limits_memory_bytes gauge + // # HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod. + // # TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge + // # HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only. + // # TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge + // ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container1", + Image: "k8s.gcr.io/hyperkube1", + ImageID: "docker://sha256:aaa", + ContainerID: "docker://ab123", + }, + }, + }, + }, + Want: `kube_pod_container_info{container="container1",container_id="docker://ab123",image="k8s.gcr.io/hyperkube1",image_id="docker://sha256:aaa",namespace="ns1",pod="pod1"} 1`, + MetricNames: []string{"kube_pod_container_info"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container2", + Image: "k8s.gcr.io/hyperkube2", + ImageID: "docker://sha256:bbb", + ContainerID: "docker://cd456", + }, + v1.ContainerStatus{ + Name: "container3", + Image: "k8s.gcr.io/hyperkube3", + ImageID: "docker://sha256:ccc", + ContainerID: "docker://ef789", + }, + }, + }, + }, + Want: `kube_pod_container_info{container="container2",container_id="docker://cd456",image="k8s.gcr.io/hyperkube2",image_id="docker://sha256:bbb",namespace="ns2",pod="pod2"} 1 + kube_pod_container_info{container="container3",container_id="docker://ef789",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",namespace="ns2",pod="pod2"} 1`, + MetricNames: []string{"kube_pod_container_info"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container1", + Ready: true, + }, + }, + }, + }, + Want: `kube_pod_container_status_ready{container="container1",namespace="ns1",pod="pod1"} 1`, + MetricNames: []string{"kube_pod_container_status_ready"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container2", + Ready: true, + }, + v1.ContainerStatus{ + Name: "container3", + Ready: false, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_container_status_ready{container="container2",namespace="ns2",pod="pod2"} 1 + kube_pod_container_status_ready{container="container3",namespace="ns2",pod="pod2"} 0 + `, + MetricNames: []string{"kube_pod_container_status_ready"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container1", + RestartCount: 0, + }, + }, + }, + }, + Want: `kube_pod_container_status_restarts_total{container="container1",namespace="ns1",pod="pod1"} 0`, + MetricNames: []string{"kube_pod_container_status_restarts_total"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container2", + RestartCount: 0, + }, + v1.ContainerStatus{ + Name: "container3", + RestartCount: 1, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_container_status_restarts_total{container="container2",namespace="ns2",pod="pod2"} 0 + kube_pod_container_status_restarts_total{container="container3",namespace="ns2",pod="pod2"} 1 + `, + MetricNames: []string{"kube_pod_container_status_restarts_total"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container1",namespace="ns1",pod="pod1"} 1 + kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container1",namespace="ns1",pod="pod1",reason="OOMKilled"} 0 + kube_pod_container_status_terminated{container="container1",namespace="ns1",pod="pod1"} 0 + kube_pod_container_status_waiting{container="container1",namespace="ns1",pod="pod1"} 0 + kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container1",namespace="ns1",pod="pod1",reason="CreateContainerConfigError"} 0 + +`, + + MetricNames: []string{ + "kube_pod_container_status_running", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container2", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: "OOMKilled", + }, + }, + }, + v1.ContainerStatus{ + Name: "container3", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ContainerCreating", + }, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container2",namespace="ns2",pod="pod2"} 0 + kube_pod_container_status_running{container="container3",namespace="ns2",pod="pod2"} 0 + kube_pod_container_status_terminated{container="container2",namespace="ns2",pod="pod2"} 1 + kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container2",namespace="ns2",pod="pod2",reason="OOMKilled"} 1 + kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container3",namespace="ns2",pod="pod2",reason="OOMKilled"} 0 + kube_pod_container_status_waiting{container="container2",namespace="ns2",pod="pod2"} 0 + kube_pod_container_status_waiting{container="container3",namespace="ns2",pod="pod2"} 1 + kube_pod_container_status_terminated{container="container3",namespace="ns2",pod="pod2"} 0 + kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container2",namespace="ns2",pod="pod2",reason="CreateContainerConfigError"} 0 + kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="ContainerCreating"} 1 + kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container3",namespace="ns2",pod="pod2",reason="CreateContainerConfigError"} 0 + +`, + MetricNames: []string{ + "kube_pod_container_status_running", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "ns3", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container4", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CrashLoopBackOff", + }, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container4",namespace="ns3",pod="pod3"} 0 + kube_pod_container_status_terminated{container="container4",namespace="ns3",pod="pod3"} 0 +kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="OOMKilled"} 0 + kube_pod_container_status_waiting{container="container4",namespace="ns3",pod="pod3"} 1 +kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="CrashLoopBackOff"} 1 + kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container4",namespace="ns3",pod="pod3",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_last_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="Completed"} 0 + kube_pod_container_status_last_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="ContainerCannotRun"} 0 + kube_pod_container_status_last_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="Error"} 0 + kube_pod_container_status_last_terminated_reason{container="container4",namespace="ns3",pod="pod3",reason="OOMKilled"} 0 +`, + MetricNames: []string{ + "kube_pod_container_status_running", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + "kube_pod_container_status_terminated_reason", + "kube_pod_container_status_terminated_reason", + "kube_pod_container_status_terminated_reason", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_last_terminated_reason", + "kube_pod_container_status_last_terminated_reason", + "kube_pod_container_status_last_terminated_reason", + "kube_pod_container_status_last_terminated_reason", + }, + }, + { + + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod6", + Namespace: "ns6", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container7", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + LastTerminationState: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: "OOMKilled", + }, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container7",namespace="ns6",pod="pod6"} 1 + kube_pod_container_status_terminated{container="container7",namespace="ns6",pod="pod6"} 0 +kube_pod_container_status_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="OOMKilled"} 0 + kube_pod_container_status_waiting{container="container7",namespace="ns6",pod="pod6"} 0 +kube_pod_container_status_waiting_reason{container="container7",namespace="ns6",pod="pod6",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container7",namespace="ns6",pod="pod6",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container7",namespace="ns6",pod="pod6",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container7",namespace="ns6",pod="pod6",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container7",namespace="ns6",pod="pod6",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_last_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="Completed"} 0 + kube_pod_container_status_last_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="ContainerCannotRun"} 0 + kube_pod_container_status_last_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="Error"} 0 + kube_pod_container_status_last_terminated_reason{container="container7",namespace="ns6",pod="pod6",reason="OOMKilled"} 1 +`, + MetricNames: []string{ + "kube_pod_container_status_last_terminated_reason", + "kube_pod_container_status_running", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod4", + Namespace: "ns4", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container5", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ImagePullBackOff", + }, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container5",namespace="ns4",pod="pod4"} 0 + kube_pod_container_status_terminated{container="container5",namespace="ns4",pod="pod4"} 0 + kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container5",namespace="ns4",pod="pod4",reason="OOMKilled"} 0 + kube_pod_container_status_waiting{container="container5",namespace="ns4",pod="pod4"} 1 + kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="ImagePullBackOff"} 1 + kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container5",namespace="ns4",pod="pod4",reason="CreateContainerConfigError"} 0 +`, + MetricNames: []string{ + "kube_pod_container_status_running", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod5", + Namespace: "ns5", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container6", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ErrImagePull", + }, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container6",namespace="ns5",pod="pod5"} 0 + kube_pod_container_status_terminated{container="container6",namespace="ns5",pod="pod5"} 0 + kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container6",namespace="ns5",pod="pod5",reason="OOMKilled"} 0 + kube_pod_container_status_waiting{container="container6",namespace="ns5",pod="pod5"} 1 + kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="ErrImagePull"} 1 + kube_pod_container_status_waiting_reason{container="container6",namespace="ns5",pod="pod5",reason="CreateContainerConfigError"} 0 +`, + MetricNames: []string{ + "kube_pod_container_status_running", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod7", + Namespace: "ns7", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container8", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CreateContainerConfigError", + }, + }, + }, + }, + }, + }, + Want: ` + kube_pod_container_status_running{container="container8",namespace="ns7",pod="pod7"} 0 + kube_pod_container_status_terminated{container="container8",namespace="ns7",pod="pod7"} 0 + kube_pod_container_status_terminated_reason{container="container8",namespace="ns7",pod="pod7",reason="Completed"} 0 + kube_pod_container_status_terminated_reason{container="container8",namespace="ns7",pod="pod7",reason="ContainerCannotRun"} 0 + kube_pod_container_status_terminated_reason{container="container8",namespace="ns7",pod="pod7",reason="Error"} 0 + kube_pod_container_status_terminated_reason{container="container8",namespace="ns7",pod="pod7",reason="OOMKilled"} 0 + kube_pod_container_status_waiting{container="container8",namespace="ns7",pod="pod7"} 1 + kube_pod_container_status_waiting_reason{container="container8",namespace="ns7",pod="pod7",reason="ContainerCreating"} 0 + kube_pod_container_status_waiting_reason{container="container8",namespace="ns7",pod="pod7",reason="ImagePullBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container8",namespace="ns7",pod="pod7",reason="CrashLoopBackOff"} 0 + kube_pod_container_status_waiting_reason{container="container8",namespace="ns7",pod="pod7",reason="ErrImagePull"} 0 + kube_pod_container_status_waiting_reason{container="container8",namespace="ns7",pod="pod7",reason="CreateContainerConfigError"} 1 +`, + MetricNames: []string{ + "kube_pod_container_status_running", + "kube_pod_container_status_terminated", + "kube_pod_container_status_terminated_reason", + "kube_pod_container_status_waiting", + "kube_pod_container_status_waiting_reason", + }, + }, + { + + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + UID: "abc-123-xxx", + }, + Spec: v1.PodSpec{ + NodeName: "node1", + }, + Status: v1.PodStatus{ + HostIP: "1.1.1.1", + PodIP: "1.2.3.4", + StartTime: &metav1StartTime, + }, + }, + // TODO: Should it be '1501569018' instead? + Want: ` + kube_pod_created{namespace="ns1",pod="pod1"} 1.5e+09 + kube_pod_info{created_by_kind="",created_by_name="",host_ip="1.1.1.1",namespace="ns1",node="node1",pod="pod1",pod_ip="1.2.3.4",uid="abc-123-xxx"} 1 + kube_pod_start_time{namespace="ns1",pod="pod1"} 1.501569018e+09 + kube_pod_owner{namespace="ns1",owner_is_controller="",owner_kind="",owner_name="",pod="pod1"} 1 +`, + MetricNames: []string{"kube_pod_created", "kube_pod_info", "kube_pod_start_time", "kube_pod_completion_time", "kube_pod_owner"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + UID: "abc-456-xxx", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "rs-name", + Controller: &test, + }, + }, + }, + Spec: v1.PodSpec{ + NodeName: "node2", + }, + Status: v1.PodStatus{ + HostIP: "1.1.1.1", + PodIP: "2.3.4.5", + ContainerStatuses: []v1.ContainerStatus{ + v1.ContainerStatus{ + Name: "container2_1", + Image: "k8s.gcr.io/hyperkube2", + ImageID: "docker://sha256:bbb", + ContainerID: "docker://cd456", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + FinishedAt: metav1.Time{ + Time: time.Unix(1501777018, 0), + }, + }, + }, + }, + v1.ContainerStatus{ + Name: "container2_2", + Image: "k8s.gcr.io/hyperkube2", + ImageID: "docker://sha256:bbb", + ContainerID: "docker://cd456", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + FinishedAt: metav1.Time{ + Time: time.Unix(1501888018, 0), + }, + }, + }, + }, + v1.ContainerStatus{ + Name: "container2_3", + Image: "k8s.gcr.io/hyperkube2", + ImageID: "docker://sha256:bbb", + ContainerID: "docker://cd456", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + FinishedAt: metav1.Time{ + Time: time.Unix(1501666018, 0), + }, + }, + }, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_info{created_by_kind="ReplicaSet",created_by_name="rs-name",host_ip="1.1.1.1",namespace="ns2",node="node2",pod="pod2",pod_ip="2.3.4.5",uid="abc-456-xxx"} 1 + kube_pod_completion_time{namespace="ns2",pod="pod2"} 1.501888018e+09 + kube_pod_owner{namespace="ns2",owner_is_controller="true",owner_kind="ReplicaSet",owner_name="rs-name",pod="pod2"} 1 + `, + MetricNames: []string{"kube_pod_created", "kube_pod_info", "kube_pod_start_time", "kube_pod_completion_time", "kube_pod_owner"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }, + Want: ` + kube_pod_status_phase{namespace="ns1",phase="Failed",pod="pod1"} 0 + kube_pod_status_phase{namespace="ns1",phase="Pending",pod="pod1"} 0 + kube_pod_status_phase{namespace="ns1",phase="Running",pod="pod1"} 1 + kube_pod_status_phase{namespace="ns1",phase="Succeeded",pod="pod1"} 0 + kube_pod_status_phase{namespace="ns1",phase="Unknown",pod="pod1"} 0 +`, + MetricNames: []string{"kube_pod_status_phase"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + Want: ` + kube_pod_status_phase{namespace="ns2",phase="Failed",pod="pod2"} 0 + kube_pod_status_phase{namespace="ns2",phase="Pending",pod="pod2"} 1 + kube_pod_status_phase{namespace="ns2",phase="Running",pod="pod2"} 0 + kube_pod_status_phase{namespace="ns2",phase="Succeeded",pod="pod2"} 0 + kube_pod_status_phase{namespace="ns2",phase="Unknown",pod="pod2"} 0 +`, + MetricNames: []string{"kube_pod_status_phase"}, + }, + { + + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod3", + Namespace: "ns3", + }, + Status: v1.PodStatus{ + Phase: v1.PodUnknown, + }, + }, + Want: ` + kube_pod_status_phase{namespace="ns3",phase="Failed",pod="pod3"} 0 + kube_pod_status_phase{namespace="ns3",phase="Pending",pod="pod3"} 0 + kube_pod_status_phase{namespace="ns3",phase="Running",pod="pod3"} 0 + kube_pod_status_phase{namespace="ns3",phase="Succeeded",pod="pod3"} 0 + kube_pod_status_phase{namespace="ns3",phase="Unknown",pod="pod3"} 1 +`, + MetricNames: []string{"kube_pod_status_phase"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod4", + Namespace: "ns4", + DeletionTimestamp: &metav1.Time{}, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + Reason: node.NodeUnreachablePodReason, + }, + }, + Want: ` + kube_pod_status_phase{namespace="ns4",phase="Failed",pod="pod4"} 0 + kube_pod_status_phase{namespace="ns4",phase="Pending",pod="pod4"} 0 + kube_pod_status_phase{namespace="ns4",phase="Running",pod="pod4"} 0 + kube_pod_status_phase{namespace="ns4",phase="Succeeded",pod="pod4"} 0 + kube_pod_status_phase{namespace="ns4",phase="Unknown",pod="pod4"} 1 +`, + MetricNames: []string{"kube_pod_status_phase"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_status_ready{condition="false",namespace="ns1",pod="pod1"} 0 + kube_pod_status_ready{condition="true",namespace="ns1",pod="pod1"} 1 + kube_pod_status_ready{condition="unknown",namespace="ns1",pod="pod1"} 0 + `, + MetricNames: []string{"kube_pod_status_ready"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_status_ready{condition="false",namespace="ns2",pod="pod2"} 1 + kube_pod_status_ready{condition="true",namespace="ns2",pod="pod2"} 0 + kube_pod_status_ready{condition="unknown",namespace="ns2",pod="pod2"} 0 + `, + MetricNames: []string{"kube_pod_status_ready"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + LastTransitionTime: metav1.Time{ + Time: time.Unix(1501666018, 0), + }, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_status_scheduled_time{namespace="ns1",pod="pod1"} 1.501666018e+09 + kube_pod_status_scheduled{condition="false",namespace="ns1",pod="pod1"} 0 + kube_pod_status_scheduled{condition="true",namespace="ns1",pod="pod1"} 1 + kube_pod_status_scheduled{condition="unknown",namespace="ns1",pod="pod1"} 0 + `, + MetricNames: []string{"kube_pod_status_scheduled", "kube_pod_status_scheduled_time"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Status: v1.PodStatus{ + Conditions: []v1.PodCondition{ + v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_status_scheduled{condition="false",namespace="ns2",pod="pod2"} 1 + kube_pod_status_scheduled{condition="true",namespace="ns2",pod="pod2"} 0 + kube_pod_status_scheduled{condition="unknown",namespace="ns2",pod="pod2"} 0 + `, + MetricNames: []string{"kube_pod_status_scheduled", "kube_pod_status_scheduled_time"}, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + }, + Spec: v1.PodSpec{ + NodeName: "node1", + Containers: []v1.Container{ + v1.Container{ + Name: "pod1_con1", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("100M"), + v1.ResourceEphemeralStorage: resource.MustParse("300M"), + v1.ResourceStorage: resource.MustParse("400M"), + v1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("100M"), + v1.ResourceEphemeralStorage: resource.MustParse("300M"), + v1.ResourceStorage: resource.MustParse("400M"), + v1.ResourceName("nvidia.com/gpu"): resource.MustParse("1"), + }, + }, + }, + v1.Container{ + Name: "pod1_con2", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("200M"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("300m"), + v1.ResourceMemory: resource.MustParse("200M"), + }, + }, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_container_resource_requests_cpu_cores{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 0.2 + kube_pod_container_resource_requests_cpu_cores{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 0.3 + kube_pod_container_resource_requests_memory_bytes{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 1e+08 + kube_pod_container_resource_requests_memory_bytes{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2e+08 + kube_pod_container_resource_limits_cpu_cores{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 0.2 + kube_pod_container_resource_limits_cpu_cores{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 0.3 + kube_pod_container_resource_limits_memory_bytes{container="pod1_con1",namespace="ns1",node="node1",pod="pod1"} 1e+08 + kube_pod_container_resource_limits_memory_bytes{container="pod1_con2",namespace="ns1",node="node1",pod="pod1"} 2e+08 + kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="cpu",unit="core"} 0.2 + kube_pod_container_resource_requests{container="pod1_con2",namespace="ns1",node="node1",pod="pod1",resource="cpu",unit="core"} 0.3 + kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1 + kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="memory",unit="byte"} 1e+08 + kube_pod_container_resource_requests{container="pod1_con2",namespace="ns1",node="node1",pod="pod1",resource="memory",unit="byte"} 2e+08 + kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="storage",unit="byte"} 4e+08 + kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="ephemeral_storage",unit="byte"} 3e+08 + kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="cpu",unit="core"} 0.2 + kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1 + kube_pod_container_resource_limits{container="pod1_con2",namespace="ns1",node="node1",pod="pod1",resource="cpu",unit="core"} 0.3 + kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="memory",unit="byte"} 1e+08 + kube_pod_container_resource_limits{container="pod1_con2",namespace="ns1",node="node1",pod="pod1",resource="memory",unit="byte"} 2e+08 + kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="storage",unit="byte"} 4e+08 + kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",node="node1",pod="pod1",resource="ephemeral_storage",unit="byte"} 3e+08 + `, + MetricNames: []string{ + "kube_pod_container_resource_requests_cpu_cores", + "kube_pod_container_resource_requests_memory_bytes", + "kube_pod_container_resource_limits_cpu_cores", + "kube_pod_container_resource_limits_memory_bytes", + "kube_pod_container_resource_requests", + "kube_pod_container_resource_limits", + }, + }, + { + + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "ns2", + }, + Spec: v1.PodSpec{ + NodeName: "node2", + Containers: []v1.Container{ + v1.Container{ + Name: "pod2_con1", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("400m"), + v1.ResourceMemory: resource.MustParse("300M"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("400m"), + v1.ResourceMemory: resource.MustParse("300M"), + }, + }, + }, + v1.Container{ + Name: "pod2_con2", + Resources: v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("500m"), + v1.ResourceMemory: resource.MustParse("400M"), + }, + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: resource.MustParse("500m"), + v1.ResourceMemory: resource.MustParse("400M"), + }, + }, + }, + // A container without a resource specicication. No metrics will be emitted for that. + v1.Container{ + Name: "pod2_con3", + }, + }, + }, + }, + Want: metadata + ` + kube_pod_container_resource_requests_cpu_cores{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 0.4 + kube_pod_container_resource_requests_cpu_cores{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 0.5 + kube_pod_container_resource_requests_memory_bytes{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3e+08 + kube_pod_container_resource_requests_memory_bytes{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 4e+08 + kube_pod_container_resource_limits_cpu_cores{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 0.4 + kube_pod_container_resource_limits_cpu_cores{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 0.5 + kube_pod_container_resource_limits_memory_bytes{container="pod2_con1",namespace="ns2",node="node2",pod="pod2"} 3e+08 + kube_pod_container_resource_limits_memory_bytes{container="pod2_con2",namespace="ns2",node="node2",pod="pod2"} 4e+08 + kube_pod_container_resource_requests{container="pod2_con1",namespace="ns2",node="node2",pod="pod2",resource="cpu",unit="core"} 0.4 + kube_pod_container_resource_requests{container="pod2_con2",namespace="ns2",node="node2",pod="pod2",resource="cpu",unit="core"} 0.5 + kube_pod_container_resource_requests{container="pod2_con1",namespace="ns2",node="node2",pod="pod2",resource="memory",unit="byte"} 3e+08 + kube_pod_container_resource_requests{container="pod2_con2",namespace="ns2",node="node2",pod="pod2",resource="memory",unit="byte"} 4e+08 + kube_pod_container_resource_limits{container="pod2_con1",namespace="ns2",node="node2",pod="pod2",resource="cpu",unit="core"} 0.4 + kube_pod_container_resource_limits{container="pod2_con2",namespace="ns2",node="node2",pod="pod2",resource="cpu",unit="core"} 0.5 + kube_pod_container_resource_limits{container="pod2_con1",namespace="ns2",node="node2",pod="pod2",resource="memory",unit="byte"} 3e+08 + kube_pod_container_resource_limits{container="pod2_con2",namespace="ns2",node="node2",pod="pod2",resource="memory",unit="byte"} 4e+08 + `, + MetricNames: []string{ + "kube_pod_container_resource_requests_cpu_cores", + "kube_pod_container_resource_requests_cpu_cores", + "kube_pod_container_resource_requests_memory_bytes", + "kube_pod_container_resource_requests_memory_bytes", + "kube_pod_container_resource_limits_cpu_cores", + "kube_pod_container_resource_limits_cpu_cores", + "kube_pod_container_resource_limits_memory_bytes", + "kube_pod_container_resource_limits_memory_bytes", + "kube_pod_container_resource_requests", + "kube_pod_container_resource_requests", + "kube_pod_container_resource_requests", + "kube_pod_container_resource_requests", + "kube_pod_container_resource_limits", + "kube_pod_container_resource_limits", + "kube_pod_container_resource_limits", + "kube_pod_container_resource_limits", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + Labels: map[string]string{ + "app": "example", + }, + }, + Spec: v1.PodSpec{}, + }, + Want: metadata + ` + kube_pod_labels{label_app="example",namespace="ns1",pod="pod1"} 1 + `, + MetricNames: []string{ + "kube_pod_labels", + }, + }, + { + Obj: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "ns1", + Labels: map[string]string{ + "app": "example", + }, + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + v1.Volume{ + Name: "myvol", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "claim1", + ReadOnly: false, + }, + }, + }, + v1.Volume{ + Name: "my-readonly-vol", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "claim2", + ReadOnly: true, + }, + }, + }, + v1.Volume{ + Name: "not-pvc-vol", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{ + Medium: "memory", + }, + }, + }, + }, + }, + }, + Want: metadata + ` + kube_pod_spec_volumes_persistentvolumeclaims_info{namespace="ns1",persistentvolumeclaim="claim1",pod="pod1",volume="myvol"} 1 + kube_pod_spec_volumes_persistentvolumeclaims_info{namespace="ns1",persistentvolumeclaim="claim2",pod="pod1",volume="my-readonly-vol"} 1 + kube_pod_spec_volumes_persistentvolumeclaims_readonly{namespace="ns1",persistentvolumeclaim="claim1",pod="pod1",volume="myvol"} 0 + kube_pod_spec_volumes_persistentvolumeclaims_readonly{namespace="ns1",persistentvolumeclaim="claim2",pod="pod1",volume="my-readonly-vol"} 1 + + `, + MetricNames: []string{ + "kube_pod_spec_volumes_persistentvolumeclaims_info", + "kube_pod_spec_volumes_persistentvolumeclaims_readonly", + }, + }, + } + + for i, c := range cases { + c.Func = composeMetricGenFuncs(podMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/poddisruptionbudget.go b/pkg/collectors/poddisruptionbudget.go new file mode 100644 index 0000000000..3575b3191e --- /dev/null +++ b/pkg/collectors/poddisruptionbudget.go @@ -0,0 +1,133 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/kube-state-metrics/pkg/metrics" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" +) + +var ( + descPodDisruptionBudgetLabelsDefaultLabels = []string{"namespace", "poddisruptionbudget"} + + podDisruptionBudgetMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_poddisruptionbudget_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapPodDisruptionBudgetFunc(func(p *v1beta1.PodDisruptionBudget) metrics.Family { + f := metrics.Family{} + + if !p.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_poddisruptionbudget_created", + Value: float64(p.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_poddisruptionbudget_status_current_healthy", + Type: metrics.MetricTypeGauge, + Help: "Current number of healthy pods", + GenerateFunc: wrapPodDisruptionBudgetFunc(func(p *v1beta1.PodDisruptionBudget) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_poddisruptionbudget_status_current_healthy", + Value: float64(p.Status.CurrentHealthy), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_poddisruptionbudget_status_desired_healthy", + Type: metrics.MetricTypeGauge, + Help: "Minimum desired number of healthy pods", + GenerateFunc: wrapPodDisruptionBudgetFunc(func(p *v1beta1.PodDisruptionBudget) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_poddisruptionbudget_status_desired_healthy", + Value: float64(p.Status.DesiredHealthy), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_poddisruptionbudget_status_pod_disruptions_allowed", + Type: metrics.MetricTypeGauge, + Help: "Number of pod disruptions that are currently allowed", + GenerateFunc: wrapPodDisruptionBudgetFunc(func(p *v1beta1.PodDisruptionBudget) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_poddisruptionbudget_status_pod_disruptions_allowed", + Value: float64(p.Status.PodDisruptionsAllowed), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_poddisruptionbudget_status_expected_pods", + Type: metrics.MetricTypeGauge, + Help: "Total number of pods counted by this disruption budget", + GenerateFunc: wrapPodDisruptionBudgetFunc(func(p *v1beta1.PodDisruptionBudget) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_poddisruptionbudget_status_expected_pods", + Value: float64(p.Status.ExpectedPods), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_poddisruptionbudget_status_observed_generation", + Type: metrics.MetricTypeGauge, + Help: "Most recent generation observed when updating this PDB status", + GenerateFunc: wrapPodDisruptionBudgetFunc(func(p *v1beta1.PodDisruptionBudget) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_poddisruptionbudget_status_observed_generation", + Value: float64(p.Status.ObservedGeneration), + }} + }), + }, + } +) + +func wrapPodDisruptionBudgetFunc(f func(*v1beta1.PodDisruptionBudget) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + podDisruptionBudget := obj.(*v1beta1.PodDisruptionBudget) + + metricFamily := f(podDisruptionBudget) + + for _, m := range metricFamily { + m.LabelKeys = append(descPodDisruptionBudgetLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{podDisruptionBudget.Namespace, podDisruptionBudget.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createPodDisruptionBudgetListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.PolicyV1beta1().PodDisruptionBudgets(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.PolicyV1beta1().PodDisruptionBudgets(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/poddisruptionbudget_test.go b/pkg/collectors/poddisruptionbudget_test.go new file mode 100644 index 0000000000..4651062c63 --- /dev/null +++ b/pkg/collectors/poddisruptionbudget_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPodDisruptionBudgetCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_poddisruptionbudget_created Unix creation timestamp + # TYPE kube_poddisruptionbudget_created gauge + # HELP kube_poddisruptionbudget_status_current_healthy Current number of healthy pods + # TYPE kube_poddisruptionbudget_status_current_healthy gauge + # HELP kube_poddisruptionbudget_status_desired_healthy Minimum desired number of healthy pods + # TYPE kube_poddisruptionbudget_status_desired_healthy gauge + # HELP kube_poddisruptionbudget_status_pod_disruptions_allowed Number of pod disruptions that are currently allowed + # TYPE kube_poddisruptionbudget_status_pod_disruptions_allowed gauge + # HELP kube_poddisruptionbudget_status_expected_pods Total number of pods counted by this disruption budget + # TYPE kube_poddisruptionbudget_status_expected_pods gauge + # HELP kube_poddisruptionbudget_status_observed_generation Most recent generation observed when updating this PDB status + # TYPE kube_poddisruptionbudget_status_observed_generation gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pdb1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + Generation: 21, + }, + Status: v1beta1.PodDisruptionBudgetStatus{ + CurrentHealthy: 12, + DesiredHealthy: 10, + PodDisruptionsAllowed: 2, + ExpectedPods: 15, + ObservedGeneration: 111, + }, + }, + Want: ` + kube_poddisruptionbudget_created{namespace="ns1",poddisruptionbudget="pdb1"} 1.5e+09 + kube_poddisruptionbudget_status_current_healthy{namespace="ns1",poddisruptionbudget="pdb1"} 12 + kube_poddisruptionbudget_status_desired_healthy{namespace="ns1",poddisruptionbudget="pdb1"} 10 + kube_poddisruptionbudget_status_pod_disruptions_allowed{namespace="ns1",poddisruptionbudget="pdb1"} 2 + kube_poddisruptionbudget_status_expected_pods{namespace="ns1",poddisruptionbudget="pdb1"} 15 + kube_poddisruptionbudget_status_observed_generation{namespace="ns1",poddisruptionbudget="pdb1"} 111 + `, + }, + { + Obj: &v1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pdb2", + Namespace: "ns2", + Generation: 14, + }, + Status: v1beta1.PodDisruptionBudgetStatus{ + CurrentHealthy: 8, + DesiredHealthy: 9, + PodDisruptionsAllowed: 0, + ExpectedPods: 10, + ObservedGeneration: 1111, + }, + }, + Want: ` + kube_poddisruptionbudget_status_current_healthy{namespace="ns2",poddisruptionbudget="pdb2"} 8 + kube_poddisruptionbudget_status_desired_healthy{namespace="ns2",poddisruptionbudget="pdb2"} 9 + kube_poddisruptionbudget_status_pod_disruptions_allowed{namespace="ns2",poddisruptionbudget="pdb2"} 0 + kube_poddisruptionbudget_status_expected_pods{namespace="ns2",poddisruptionbudget="pdb2"} 10 + kube_poddisruptionbudget_status_observed_generation{namespace="ns2",poddisruptionbudget="pdb2"} 1111 + `, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(podDisruptionBudgetMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/replicaset.go b/pkg/collectors/replicaset.go new file mode 100644 index 0000000000..e7dbb64883 --- /dev/null +++ b/pkg/collectors/replicaset.go @@ -0,0 +1,187 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "strconv" + + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descReplicaSetLabelsDefaultLabels = []string{"namespace", "replicaset"} + + replicaSetMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_replicaset_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + f := metrics.Family{} + + if !r.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_replicaset_created", + Value: float64(r.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_status_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of replicas per ReplicaSet.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicaset_status_replicas", + Value: float64(r.Status.Replicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_status_fully_labeled_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of fully labeled replicas per ReplicaSet.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicaset_status_fully_labeled_replicas", + Value: float64(r.Status.FullyLabeledReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_status_ready_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of ready replicas per ReplicaSet.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicaset_status_ready_replicas", + Value: float64(r.Status.ReadyReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_status_observed_generation", + Type: metrics.MetricTypeGauge, + Help: "The generation observed by the ReplicaSet controller.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicaset_status_observed_generation", + Value: float64(r.Status.ObservedGeneration), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_spec_replicas", + Type: metrics.MetricTypeGauge, + Help: "Number of desired pods for a ReplicaSet.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + f := metrics.Family{} + + if r.Spec.Replicas != nil { + f = append(f, &metrics.Metric{ + Name: "kube_replicaset_spec_replicas", + Value: float64(*r.Spec.Replicas), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_metadata_generation", + Type: metrics.MetricTypeGauge, + Help: "Sequence number representing a specific generation of the desired state.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicaset_metadata_generation", + Value: float64(r.ObjectMeta.Generation), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicaset_owner", + Type: metrics.MetricTypeGauge, + Help: "Information about the ReplicaSet's owner.", + GenerateFunc: wrapReplicaSetFunc(func(r *v1beta1.ReplicaSet) metrics.Family { + f := metrics.Family{} + + owners := r.GetOwnerReferences() + if len(owners) == 0 { + f = append(f, &metrics.Metric{ + LabelValues: []string{"", "", ""}, + }) + } else { + for _, owner := range owners { + if owner.Controller != nil { + f = append(f, &metrics.Metric{ + LabelValues: []string{owner.Kind, owner.Name, strconv.FormatBool(*owner.Controller)}, + }) + } else { + f = append(f, &metrics.Metric{ + LabelValues: []string{owner.Kind, owner.Name, "false"}, + }) + } + } + } + + for _, m := range f { + m.Name = "kube_replicaset_owner" + m.LabelKeys = []string{"owner_kind", "owner_name", "owner_is_controller"} + m.Value = 1 + } + + return f + }), + }, + } +) + +func wrapReplicaSetFunc(f func(*v1beta1.ReplicaSet) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + replicaSet := obj.(*v1beta1.ReplicaSet) + + metricFamily := f(replicaSet) + + for _, m := range metricFamily { + m.LabelKeys = append(descReplicaSetLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{replicaSet.Namespace, replicaSet.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createReplicaSetListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.ExtensionsV1beta1().ReplicaSets(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.ExtensionsV1beta1().ReplicaSets(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/replicaset_test.go b/pkg/collectors/replicaset_test.go new file mode 100644 index 0000000000..1ef4976e22 --- /dev/null +++ b/pkg/collectors/replicaset_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + rs1Replicas int32 = 5 + rs2Replicas int32 = 0 +) + +func TestReplicaSetCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + var test = true + + const metadata = ` + # HELP kube_replicaset_created Unix creation timestamp + # TYPE kube_replicaset_created gauge + # HELP kube_replicaset_metadata_generation Sequence number representing a specific generation of the desired state. + # TYPE kube_replicaset_metadata_generation gauge + # HELP kube_replicaset_status_replicas The number of replicas per ReplicaSet. + # TYPE kube_replicaset_status_replicas gauge + # HELP kube_replicaset_status_fully_labeled_replicas The number of fully labeled replicas per ReplicaSet. + # TYPE kube_replicaset_status_fully_labeled_replicas gauge + # HELP kube_replicaset_status_ready_replicas The number of ready replicas per ReplicaSet. + # TYPE kube_replicaset_status_ready_replicas gauge + # HELP kube_replicaset_status_observed_generation The generation observed by the ReplicaSet controller. + # TYPE kube_replicaset_status_observed_generation gauge + # HELP kube_replicaset_spec_replicas Number of desired pods for a ReplicaSet. + # TYPE kube_replicaset_spec_replicas gauge + # HELP kube_replicaset_owner Information about the ReplicaSet's owner. + # TYPE kube_replicaset_owner gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1beta1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + Generation: 21, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: "dp-name", + Controller: &test, + }, + }, + }, + Status: v1beta1.ReplicaSetStatus{ + Replicas: 5, + FullyLabeledReplicas: 10, + ReadyReplicas: 5, + ObservedGeneration: 1, + }, + Spec: v1beta1.ReplicaSetSpec{ + Replicas: &rs1Replicas, + }, + }, + Want: ` + kube_replicaset_created{namespace="ns1",replicaset="rs1"} 1.5e+09 + kube_replicaset_metadata_generation{namespace="ns1",replicaset="rs1"} 21 + kube_replicaset_status_replicas{namespace="ns1",replicaset="rs1"} 5 + kube_replicaset_status_observed_generation{namespace="ns1",replicaset="rs1"} 1 + kube_replicaset_status_fully_labeled_replicas{namespace="ns1",replicaset="rs1"} 10 + kube_replicaset_status_ready_replicas{namespace="ns1",replicaset="rs1"} 5 + kube_replicaset_spec_replicas{namespace="ns1",replicaset="rs1"} 5 + kube_replicaset_owner{namespace="ns1",owner_is_controller="true",owner_kind="Deployment",owner_name="dp-name",replicaset="rs1"} 1 +`, + }, + { + Obj: &v1beta1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rs2", + Namespace: "ns2", + Generation: 14, + }, + Status: v1beta1.ReplicaSetStatus{ + Replicas: 0, + FullyLabeledReplicas: 5, + ReadyReplicas: 0, + ObservedGeneration: 5, + }, + Spec: v1beta1.ReplicaSetSpec{ + Replicas: &rs2Replicas, + }, + }, + Want: ` + kube_replicaset_metadata_generation{namespace="ns2",replicaset="rs2"} 14 + kube_replicaset_status_replicas{namespace="ns2",replicaset="rs2"} 0 + kube_replicaset_status_observed_generation{namespace="ns2",replicaset="rs2"} 5 + kube_replicaset_status_fully_labeled_replicas{namespace="ns2",replicaset="rs2"} 5 + kube_replicaset_status_ready_replicas{namespace="ns2",replicaset="rs2"} 0 + kube_replicaset_spec_replicas{namespace="ns2",replicaset="rs2"} 0 + kube_replicaset_owner{namespace="ns2",owner_is_controller="",owner_kind="",owner_name="",replicaset="rs2"} 1 + `, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(replicaSetMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + + } +} diff --git a/pkg/collectors/replicationcontroller.go b/pkg/collectors/replicationcontroller.go new file mode 100644 index 0000000000..87cbbb4185 --- /dev/null +++ b/pkg/collectors/replicationcontroller.go @@ -0,0 +1,161 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descReplicationControllerLabelsDefaultLabels = []string{"namespace", "replicationcontroller"} + + replicationControllerMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + f := metrics.Family{} + + if !r.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_replicationcontroller_created", + Value: float64(r.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_status_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of replicas per ReplicationController.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicationcontroller_status_replicas", + Value: float64(r.Status.Replicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_status_fully_labeled_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of fully labeled replicas per ReplicationController.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicationcontroller_status_fully_labeled_replicas", + Value: float64(r.Status.FullyLabeledReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_status_ready_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of ready replicas per ReplicationController.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicationcontroller_status_ready_replicas", + Value: float64(r.Status.ReadyReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_status_available_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of available replicas per ReplicationController.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicationcontroller_status_available_replicas", + Value: float64(r.Status.AvailableReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_status_observed_generation", + Type: metrics.MetricTypeGauge, + Help: "The generation observed by the ReplicationController controller.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicationcontroller_status_observed_generation", + Value: float64(r.Status.ObservedGeneration), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_spec_replicas", + Type: metrics.MetricTypeGauge, + Help: "Number of desired pods for a ReplicationController.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + f := metrics.Family{} + + if r.Spec.Replicas != nil { + f = append(f, &metrics.Metric{ + Name: "kube_replicationcontroller_spec_replicas", + Value: float64(*r.Spec.Replicas), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_replicationcontroller_metadata_generation", + Type: metrics.MetricTypeGauge, + Help: "Sequence number representing a specific generation of the desired state.", + GenerateFunc: wrapReplicationControllerFunc(func(r *v1.ReplicationController) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_replicationcontroller_metadata_generation", + Value: float64(r.ObjectMeta.Generation), + }} + }), + }, + } +) + +func wrapReplicationControllerFunc(f func(*v1.ReplicationController) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + replicationController := obj.(*v1.ReplicationController) + + metricFamily := f(replicationController) + + for _, m := range metricFamily { + m.LabelKeys = append(descReplicationControllerLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{replicationController.Namespace, replicationController.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createReplicationControllerListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().ReplicationControllers(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().ReplicationControllers(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/replicationcontroller_test.go b/pkg/collectors/replicationcontroller_test.go new file mode 100644 index 0000000000..20e595f1d3 --- /dev/null +++ b/pkg/collectors/replicationcontroller_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + rc1Replicas int32 = 5 + rc2Replicas int32 = 0 +) + +func TestReplicationControllerCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_replicationcontroller_created Unix creation timestamp + # TYPE kube_replicationcontroller_created gauge + # HELP kube_replicationcontroller_metadata_generation Sequence number representing a specific generation of the desired state. + # TYPE kube_replicationcontroller_metadata_generation gauge + # HELP kube_replicationcontroller_status_replicas The number of replicas per ReplicationController. + # TYPE kube_replicationcontroller_status_replicas gauge + # HELP kube_replicationcontroller_status_fully_labeled_replicas The number of fully labeled replicas per ReplicationController. + # TYPE kube_replicationcontroller_status_fully_labeled_replicas gauge + # HELP kube_replicationcontroller_status_available_replicas The number of available replicas per ReplicationController. + # TYPE kube_replicationcontroller_status_available_replicas gauge + # HELP kube_replicationcontroller_status_ready_replicas The number of ready replicas per ReplicationController. + # TYPE kube_replicationcontroller_status_ready_replicas gauge + # HELP kube_replicationcontroller_status_observed_generation The generation observed by the ReplicationController controller. + # TYPE kube_replicationcontroller_status_observed_generation gauge + # HELP kube_replicationcontroller_spec_replicas Number of desired pods for a ReplicationController. + # TYPE kube_replicationcontroller_spec_replicas gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rc1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + Generation: 21, + }, + Status: v1.ReplicationControllerStatus{ + Replicas: 5, + FullyLabeledReplicas: 10, + ReadyReplicas: 5, + AvailableReplicas: 3, + ObservedGeneration: 1, + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: &rc1Replicas, + }, + }, + Want: ` + kube_replicationcontroller_created{namespace="ns1",replicationcontroller="rc1"} 1.5e+09 + kube_replicationcontroller_metadata_generation{namespace="ns1",replicationcontroller="rc1"} 21 + kube_replicationcontroller_status_replicas{namespace="ns1",replicationcontroller="rc1"} 5 + kube_replicationcontroller_status_observed_generation{namespace="ns1",replicationcontroller="rc1"} 1 + kube_replicationcontroller_status_fully_labeled_replicas{namespace="ns1",replicationcontroller="rc1"} 10 + kube_replicationcontroller_status_ready_replicas{namespace="ns1",replicationcontroller="rc1"} 5 + kube_replicationcontroller_status_available_replicas{namespace="ns1",replicationcontroller="rc1"} 3 + kube_replicationcontroller_spec_replicas{namespace="ns1",replicationcontroller="rc1"} 5 +`, + }, + { + Obj: &v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rc2", + Namespace: "ns2", + Generation: 14, + }, + Status: v1.ReplicationControllerStatus{ + Replicas: 0, + FullyLabeledReplicas: 5, + ReadyReplicas: 0, + AvailableReplicas: 0, + ObservedGeneration: 5, + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: &rc2Replicas, + }, + }, + Want: ` + kube_replicationcontroller_metadata_generation{namespace="ns2",replicationcontroller="rc2"} 14 + kube_replicationcontroller_status_replicas{namespace="ns2",replicationcontroller="rc2"} 0 + kube_replicationcontroller_status_observed_generation{namespace="ns2",replicationcontroller="rc2"} 5 + kube_replicationcontroller_status_fully_labeled_replicas{namespace="ns2",replicationcontroller="rc2"} 5 + kube_replicationcontroller_status_ready_replicas{namespace="ns2",replicationcontroller="rc2"} 0 + kube_replicationcontroller_status_available_replicas{namespace="ns2",replicationcontroller="rc2"} 0 + kube_replicationcontroller_spec_replicas{namespace="ns2",replicationcontroller="rc2"} 0 +`, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(replicationControllerMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/resourcequota.go b/pkg/collectors/resourcequota.go new file mode 100644 index 0000000000..dbdc036531 --- /dev/null +++ b/pkg/collectors/resourcequota.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descResourceQuotaLabelsDefaultLabels = []string{"namespace", "resourcequota"} + + resourceQuotaMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_resourcequota_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapResourceQuotaFunc(func(r *v1.ResourceQuota) metrics.Family { + f := metrics.Family{} + + if !r.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_resourcequota_created", + Value: float64(r.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_resourcequota", + Type: metrics.MetricTypeGauge, + Help: "Information about resource quota.", + GenerateFunc: wrapResourceQuotaFunc(func(r *v1.ResourceQuota) metrics.Family { + f := metrics.Family{} + + for res, qty := range r.Status.Hard { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(res), "hard"}, + Value: float64(qty.MilliValue()) / 1000, + }) + } + for res, qty := range r.Status.Used { + f = append(f, &metrics.Metric{ + LabelValues: []string{string(res), "used"}, + Value: float64(qty.MilliValue()) / 1000, + }) + } + + for _, m := range f { + m.Name = "kube_resourcequota" + m.LabelKeys = []string{"resource", "type"} + } + + return f + }), + }, + } +) + +func wrapResourceQuotaFunc(f func(*v1.ResourceQuota) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + resourceQuota := obj.(*v1.ResourceQuota) + + metricFamily := f(resourceQuota) + + for _, m := range metricFamily { + m.LabelKeys = append(descResourceQuotaLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{resourceQuota.Namespace, resourceQuota.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createResourceQuotaListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().ResourceQuotas(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().ResourceQuotas(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/resourcequota_test.go b/pkg/collectors/resourcequota_test.go new file mode 100644 index 0000000000..378f6bb5d9 --- /dev/null +++ b/pkg/collectors/resourcequota_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestResourceQuotaCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_resourcequota Information about resource quota. + # TYPE kube_resourcequota gauge + # HELP kube_resourcequota_created Unix creation timestamp + # TYPE kube_resourcequota_created gauge + ` + cases := []generateMetricsTestCase{ + // Verify populating base metrics and that metrics for unset fields are skipped. + { + Obj: &v1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "quotaTest", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "testNS", + }, + Status: v1.ResourceQuotaStatus{}, + }, + Want: ` + kube_resourcequota_created{namespace="testNS",resourcequota="quotaTest"} 1.5e+09 + `, + }, + // Verify resource metrics. + { + Obj: &v1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "quotaTest", + Namespace: "testNS", + }, + Spec: v1.ResourceQuotaSpec{ + Hard: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4.3"), + v1.ResourceMemory: resource.MustParse("2.1G"), + v1.ResourceStorage: resource.MustParse("10G"), + v1.ResourcePods: resource.MustParse("9"), + v1.ResourceServices: resource.MustParse("8"), + v1.ResourceReplicationControllers: resource.MustParse("7"), + v1.ResourceQuotas: resource.MustParse("6"), + v1.ResourceSecrets: resource.MustParse("5"), + v1.ResourceConfigMaps: resource.MustParse("4"), + v1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + v1.ResourceServicesNodePorts: resource.MustParse("2"), + v1.ResourceServicesLoadBalancers: resource.MustParse("1"), + }, + }, + Status: v1.ResourceQuotaStatus{ + Hard: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4.3"), + v1.ResourceMemory: resource.MustParse("2.1G"), + v1.ResourceStorage: resource.MustParse("10G"), + v1.ResourcePods: resource.MustParse("9"), + v1.ResourceServices: resource.MustParse("8"), + v1.ResourceReplicationControllers: resource.MustParse("7"), + v1.ResourceQuotas: resource.MustParse("6"), + v1.ResourceSecrets: resource.MustParse("5"), + v1.ResourceConfigMaps: resource.MustParse("4"), + v1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + v1.ResourceServicesNodePorts: resource.MustParse("2"), + v1.ResourceServicesLoadBalancers: resource.MustParse("1"), + }, + Used: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2.1"), + v1.ResourceMemory: resource.MustParse("500M"), + v1.ResourceStorage: resource.MustParse("9G"), + v1.ResourcePods: resource.MustParse("8"), + v1.ResourceServices: resource.MustParse("7"), + v1.ResourceReplicationControllers: resource.MustParse("6"), + v1.ResourceQuotas: resource.MustParse("5"), + v1.ResourceSecrets: resource.MustParse("4"), + v1.ResourceConfigMaps: resource.MustParse("3"), + v1.ResourcePersistentVolumeClaims: resource.MustParse("2"), + v1.ResourceServicesNodePorts: resource.MustParse("1"), + v1.ResourceServicesLoadBalancers: resource.MustParse("0"), + }, + }, + }, + Want: ` + kube_resourcequota{namespace="testNS",resource="configmaps",resourcequota="quotaTest",type="hard"} 4 + kube_resourcequota{namespace="testNS",resource="configmaps",resourcequota="quotaTest",type="used"} 3 + kube_resourcequota{namespace="testNS",resource="cpu",resourcequota="quotaTest",type="hard"} 4.3 + kube_resourcequota{namespace="testNS",resource="cpu",resourcequota="quotaTest",type="used"} 2.1 + kube_resourcequota{namespace="testNS",resource="memory",resourcequota="quotaTest",type="hard"} 2.1e+09 + kube_resourcequota{namespace="testNS",resource="memory",resourcequota="quotaTest",type="used"} 5e+08 + kube_resourcequota{namespace="testNS",resource="persistentvolumeclaims",resourcequota="quotaTest",type="hard"} 3 + kube_resourcequota{namespace="testNS",resource="persistentvolumeclaims",resourcequota="quotaTest",type="used"} 2 + kube_resourcequota{namespace="testNS",resource="pods",resourcequota="quotaTest",type="hard"} 9 + kube_resourcequota{namespace="testNS",resource="pods",resourcequota="quotaTest",type="used"} 8 + kube_resourcequota{namespace="testNS",resource="replicationcontrollers",resourcequota="quotaTest",type="hard"} 7 + kube_resourcequota{namespace="testNS",resource="replicationcontrollers",resourcequota="quotaTest",type="used"} 6 + kube_resourcequota{namespace="testNS",resource="resourcequotas",resourcequota="quotaTest",type="hard"} 6 + kube_resourcequota{namespace="testNS",resource="resourcequotas",resourcequota="quotaTest",type="used"} 5 + kube_resourcequota{namespace="testNS",resource="secrets",resourcequota="quotaTest",type="hard"} 5 + kube_resourcequota{namespace="testNS",resource="secrets",resourcequota="quotaTest",type="used"} 4 + kube_resourcequota{namespace="testNS",resource="services",resourcequota="quotaTest",type="hard"} 8 + kube_resourcequota{namespace="testNS",resource="services",resourcequota="quotaTest",type="used"} 7 + kube_resourcequota{namespace="testNS",resource="services.loadbalancers",resourcequota="quotaTest",type="hard"} 1 + kube_resourcequota{namespace="testNS",resource="services.loadbalancers",resourcequota="quotaTest",type="used"} 0 + kube_resourcequota{namespace="testNS",resource="services.nodeports",resourcequota="quotaTest",type="hard"} 2 + kube_resourcequota{namespace="testNS",resource="services.nodeports",resourcequota="quotaTest",type="used"} 1 + kube_resourcequota{namespace="testNS",resource="storage",resourcequota="quotaTest",type="hard"} 1e+10 + kube_resourcequota{namespace="testNS",resource="storage",resourcequota="quotaTest",type="used"} 9e+09 + `, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(resourceQuotaMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/secret.go b/pkg/collectors/secret.go new file mode 100644 index 0000000000..037b0e8ecc --- /dev/null +++ b/pkg/collectors/secret.go @@ -0,0 +1,132 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descSecretLabelsName = "kube_secret_labels" + descSecretLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descSecretLabelsDefaultLabels = []string{"namespace", "secret"} + + secretMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_secret_info", + Type: metrics.MetricTypeGauge, + Help: "Information about secret.", + GenerateFunc: wrapSecretFunc(func(s *v1.Secret) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_secret_info", + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_secret_type", + Type: metrics.MetricTypeGauge, + Help: "Type about secret.", + GenerateFunc: wrapSecretFunc(func(s *v1.Secret) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_secret_type", + LabelKeys: []string{"type"}, + LabelValues: []string{string(s.Type)}, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: descSecretLabelsName, + Type: metrics.MetricTypeGauge, + Help: descSecretLabelsHelp, + GenerateFunc: wrapSecretFunc(func(s *v1.Secret) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(s.Labels) + return metrics.Family{&metrics.Metric{ + Name: descSecretLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + + }), + }, + metrics.FamilyGenerator{ + Name: "kube_secret_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapSecretFunc(func(s *v1.Secret) metrics.Family { + f := metrics.Family{} + + if !s.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_secret_created", + Value: float64(s.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_secret_metadata_resource_version", + Type: metrics.MetricTypeGauge, + Help: "Resource version representing a specific version of secret.", + GenerateFunc: wrapSecretFunc(func(s *v1.Secret) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_secret_metadata_resource_version", + LabelKeys: []string{"resource_version"}, + LabelValues: []string{string(s.ObjectMeta.ResourceVersion)}, + Value: 1, + }} + }), + }, + } +) + +func wrapSecretFunc(f func(*v1.Secret) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + secret := obj.(*v1.Secret) + + metricFamily := f(secret) + + for _, m := range metricFamily { + m.LabelKeys = append(descSecretLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{secret.Namespace, secret.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createSecretListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Secrets(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Secrets(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/secret_test.go b/pkg/collectors/secret_test.go new file mode 100644 index 0000000000..1bec1aee00 --- /dev/null +++ b/pkg/collectors/secret_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSecretCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + + startTime := 1501569018 + metav1StartTime := metav1.Unix(int64(startTime), 0) + + const metadata = ` + # HELP kube_secret_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_secret_labels gauge + # HELP kube_secret_info Information about secret. + # TYPE kube_secret_info gauge + # HELP kube_secret_type Type about secret. + # TYPE kube_secret_type gauge + # HELP kube_secret_created Unix creation timestamp + # TYPE kube_secret_created gauge + # HELP kube_secret_metadata_resource_version Resource version representing a specific version of secret. + # TYPE kube_secret_metadata_resource_version gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + Namespace: "ns1", + ResourceVersion: "000000", + }, + Type: v1.SecretTypeOpaque, + }, + Want: ` + kube_secret_info{namespace="ns1",secret="secret1"} 1 + kube_secret_type{namespace="ns1",secret="secret1",type="Opaque"} 1 + kube_secret_metadata_resource_version{namespace="ns1",resource_version="000000",secret="secret1"} 1 + kube_secret_labels{namespace="ns1",secret="secret1"} 1 +`, + MetricNames: []string{"kube_secret_info", "kube_secret_metadata_resource_version", "kube_secret_created", "kube_secret_labels", "kube_secret_type"}, + }, + { + Obj: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret2", + Namespace: "ns2", + CreationTimestamp: metav1StartTime, + ResourceVersion: "123456", + }, + Type: v1.SecretTypeServiceAccountToken, + }, + Want: ` + kube_secret_info{namespace="ns2",secret="secret2"} 1 + kube_secret_type{namespace="ns2",secret="secret2",type="kubernetes.io/service-account-token"} 1 + kube_secret_created{namespace="ns2",secret="secret2"} 1.501569018e+09 + kube_secret_metadata_resource_version{namespace="ns2",resource_version="123456",secret="secret2"} 1 + kube_secret_labels{namespace="ns2",secret="secret2"} 1 + `, + MetricNames: []string{"kube_secret_info", "kube_secret_metadata_resource_version", "kube_secret_created", "kube_secret_labels", "kube_secret_type"}, + }, + { + Obj: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret3", + Namespace: "ns3", + CreationTimestamp: metav1StartTime, + Labels: map[string]string{"test-3": "test-3"}, + ResourceVersion: "abcdef", + }, + Type: v1.SecretTypeDockercfg, + }, + Want: ` + kube_secret_info{namespace="ns3",secret="secret3"} 1 + kube_secret_type{namespace="ns3",secret="secret3",type="kubernetes.io/dockercfg"} 1 + kube_secret_created{namespace="ns3",secret="secret3"} 1.501569018e+09 + kube_secret_metadata_resource_version{namespace="ns3",resource_version="abcdef",secret="secret3"} 1 + kube_secret_labels{label_test_3="test-3",namespace="ns3",secret="secret3"} 1 +`, + MetricNames: []string{"kube_secret_info", "kube_secret_metadata_resource_version", "kube_secret_created", "kube_secret_labels", "kube_secret_type"}, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(secretMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + + } +} diff --git a/pkg/collectors/service.go b/pkg/collectors/service.go new file mode 100644 index 0000000000..4fc54a3779 --- /dev/null +++ b/pkg/collectors/service.go @@ -0,0 +1,166 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descServiceLabelsName = "kube_service_labels" + descServiceLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descServiceLabelsDefaultLabels = []string{"namespace", "service"} + + serviceMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_service_info", + Type: metrics.MetricTypeGauge, + Help: "Information about service.", + GenerateFunc: wrapSvcFunc(func(s *v1.Service) metrics.Family { + m := metrics.Metric{ + Name: "kube_service_info", + LabelKeys: []string{"cluster_ip", "external_name", "load_balancer_ip"}, + LabelValues: []string{s.Spec.ClusterIP, s.Spec.ExternalName, s.Spec.LoadBalancerIP}, + Value: 1, + } + return metrics.Family{&m} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_service_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapSvcFunc(func(s *v1.Service) metrics.Family { + if !s.CreationTimestamp.IsZero() { + m := metrics.Metric{ + Name: "kube_service_created", + LabelKeys: nil, + LabelValues: nil, + Value: float64(s.CreationTimestamp.Unix()), + } + return metrics.Family{&m} + } + return nil + }), + }, + metrics.FamilyGenerator{ + Name: "kube_service_spec_type", + Type: metrics.MetricTypeGauge, + Help: "Type about service.", + GenerateFunc: wrapSvcFunc(func(s *v1.Service) metrics.Family { + m := metrics.Metric{ + Name: "kube_service_spec_type", + LabelKeys: []string{"type"}, + LabelValues: []string{string(s.Spec.Type)}, + Value: 1, + } + return metrics.Family{&m} + }), + }, + metrics.FamilyGenerator{ + Name: descServiceLabelsName, + Type: metrics.MetricTypeGauge, + Help: descServiceLabelsHelp, + GenerateFunc: wrapSvcFunc(func(s *v1.Service) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(s.Labels) + m := metrics.Metric{ + Name: descServiceLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + } + return metrics.Family{&m} + }), + }, + { + Name: "kube_service_spec_external_ip", + Type: metrics.MetricTypeGauge, + Help: "Service external ips. One series for each ip", + GenerateFunc: wrapSvcFunc(func(s *v1.Service) metrics.Family { + family := metrics.Family{} + + if len(s.Spec.ExternalIPs) > 0 { + for _, externalIP := range s.Spec.ExternalIPs { + family = append(family, &metrics.Metric{ + Name: "kube_service_spec_external_ip", + LabelKeys: []string{"external_ip"}, + LabelValues: []string{externalIP}, + Value: 1, + }) + } + } + + return family + }), + }, + { + Name: "kube_service_status_load_balancer_ingress", + Type: metrics.MetricTypeGauge, + Help: "Service load balancer ingress status", + GenerateFunc: wrapSvcFunc(func(s *v1.Service) metrics.Family { + family := metrics.Family{} + + if len(s.Status.LoadBalancer.Ingress) > 0 { + for _, ingress := range s.Status.LoadBalancer.Ingress { + family = append(family, &metrics.Metric{ + Name: "kube_service_status_load_balancer_ingress", + LabelKeys: []string{"ip", "hostname"}, + LabelValues: []string{ingress.IP, ingress.Hostname}, + Value: 1, + }) + + } + } + + return family + }), + }, + } +) + +func wrapSvcFunc(f func(*v1.Service) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + svc := obj.(*v1.Service) + + metricFamily := f(svc) + + for _, m := range metricFamily { + m.LabelKeys = append(descServiceLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{svc.Namespace, svc.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createServiceListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Services(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Services(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/service_test.go b/pkg/collectors/service_test.go new file mode 100644 index 0000000000..6b80c2c3a2 --- /dev/null +++ b/pkg/collectors/service_test.go @@ -0,0 +1,207 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestServiceCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_service_info Information about service. + # TYPE kube_service_info gauge + # HELP kube_service_created Unix creation timestamp + # TYPE kube_service_created gauge + # HELP kube_service_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_service_labels gauge + # HELP kube_service_spec_type Type about service. + # TYPE kube_service_spec_type gauge + # HELP kube_service_spec_external_ip Service external ips. One series for each ip + # TYPE kube_service_spec_external_ip gauge + # HELP kube_service_status_load_balancer_ingress Service load balancer ingress status + # TYPE kube_service_status_load_balancer_ingress gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "example1", + }, + }, + Spec: v1.ServiceSpec{ + ClusterIP: "1.2.3.4", + Type: v1.ServiceTypeClusterIP, + }, + }, + Want: ` + kube_service_created{namespace="default",service="test-service1"} 1.5e+09 + kube_service_info{cluster_ip="1.2.3.4",external_name="",load_balancer_ip="",namespace="default",service="test-service1"} 1 + kube_service_labels{label_app="example1",namespace="default",service="test-service1"} 1 + kube_service_spec_type{namespace="default",service="test-service1",type="ClusterIP"} 1 +`, + MetricNames: []string{ + "kube_service_created", + "kube_service_info", + "kube_service_labels", + "kube_service_spec_type", + }, + }, + { + + Obj: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service2", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "example2", + }, + }, + Spec: v1.ServiceSpec{ + ClusterIP: "1.2.3.5", + Type: v1.ServiceTypeNodePort, + }, + }, + Want: ` + kube_service_created{namespace="default",service="test-service2"} 1.5e+09 + kube_service_info{cluster_ip="1.2.3.5",external_name="",load_balancer_ip="",namespace="default",service="test-service2"} 1 + kube_service_labels{label_app="example2",namespace="default",service="test-service2"} 1 + kube_service_spec_type{namespace="default",service="test-service2",type="NodePort"} 1 +`, + }, + { + Obj: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service3", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "example3", + }, + }, + Spec: v1.ServiceSpec{ + ClusterIP: "1.2.3.6", + LoadBalancerIP: "1.2.3.7", + Type: v1.ServiceTypeLoadBalancer, + }, + }, + Want: ` + kube_service_created{namespace="default",service="test-service3"} 1.5e+09 + kube_service_info{cluster_ip="1.2.3.6",external_name="",load_balancer_ip="1.2.3.7",namespace="default",service="test-service3"} 1 + kube_service_labels{label_app="example3",namespace="default",service="test-service3"} 1 + kube_service_spec_type{namespace="default",service="test-service3",type="LoadBalancer"} 1 +`, + }, + { + Obj: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service4", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "example4", + }, + }, + Spec: v1.ServiceSpec{ + ExternalName: "www.example.com", + Type: v1.ServiceTypeExternalName, + }, + }, + Want: ` + kube_service_created{namespace="default",service="test-service4"} 1.5e+09 + kube_service_info{cluster_ip="",external_name="www.example.com",load_balancer_ip="",namespace="default",service="test-service4"} 1 + kube_service_labels{label_app="example4",namespace="default",service="test-service4"} 1 + kube_service_spec_type{namespace="default",service="test-service4",type="ExternalName"} 1 + `, + }, + { + Obj: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service5", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "example5", + }, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeLoadBalancer, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + v1.LoadBalancerIngress{ + IP: "1.2.3.8", + Hostname: "www.example.com", + }, + }, + }, + }, + }, + Want: ` + kube_service_created{namespace="default",service="test-service5"} 1.5e+09 + kube_service_info{cluster_ip="",external_name="",load_balancer_ip="",namespace="default",service="test-service5"} 1 + kube_service_labels{label_app="example5",namespace="default",service="test-service5"} 1 + kube_service_spec_type{namespace="default",service="test-service5",type="LoadBalancer"} 1 + kube_service_status_load_balancer_ingress{hostname="www.example.com",ip="1.2.3.8",namespace="default",service="test-service5"} 1 + `, + }, + { + Obj: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service6", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "default", + Labels: map[string]string{ + "app": "example6", + }, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + ExternalIPs: []string{ + "1.2.3.9", + "1.2.3.10", + }, + }, + }, + Want: ` + kube_service_created{namespace="default",service="test-service6"} 1.5e+09 + kube_service_info{cluster_ip="",external_name="",load_balancer_ip="",namespace="default",service="test-service6"} 1 + kube_service_labels{label_app="example6",namespace="default",service="test-service6"} 1 + kube_service_spec_type{namespace="default",service="test-service6",type="ClusterIP"} 1 + kube_service_spec_external_ip{external_ip="1.2.3.9",namespace="default",service="test-service6"} 1 + kube_service_spec_external_ip{external_ip="1.2.3.10",namespace="default",service="test-service6"} 1 + `, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(serviceMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/statefulset.go b/pkg/collectors/statefulset.go new file mode 100644 index 0000000000..f5840851e8 --- /dev/null +++ b/pkg/collectors/statefulset.go @@ -0,0 +1,209 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "k8s.io/kube-state-metrics/pkg/metrics" + + "k8s.io/api/apps/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +var ( + descStatefulSetLabelsName = "kube_statefulset_labels" + descStatefulSetLabelsHelp = "Kubernetes labels converted to Prometheus labels." + descStatefulSetLabelsDefaultLabels = []string{"namespace", "statefulset"} + + statefulSetMetricFamilies = []metrics.FamilyGenerator{ + metrics.FamilyGenerator{ + Name: "kube_statefulset_created", + Type: metrics.MetricTypeGauge, + Help: "Unix creation timestamp", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + f := metrics.Family{} + + if !s.CreationTimestamp.IsZero() { + f = append(f, &metrics.Metric{ + Name: "kube_statefulset_created", + Value: float64(s.CreationTimestamp.Unix()), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_replicas", + Type: metrics.MetricTypeGauge, + Help: "The number of replicas per StatefulSet.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_status_replicas", + Value: float64(s.Status.Replicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_replicas_current", + Type: metrics.MetricTypeGauge, + Help: "The number of current replicas per StatefulSet.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_status_replicas_current", + Value: float64(s.Status.CurrentReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_replicas_ready", + Type: metrics.MetricTypeGauge, + Help: "The number of ready replicas per StatefulSet.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_status_replicas_ready", + Value: float64(s.Status.ReadyReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_replicas_updated", + Type: metrics.MetricTypeGauge, + Help: "The number of updated replicas per StatefulSet.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_status_replicas_updated", + Value: float64(s.Status.UpdatedReplicas), + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_observed_generation", + Type: metrics.MetricTypeGauge, + Help: "The generation observed by the StatefulSet controller.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + f := metrics.Family{} + + if s.Status.ObservedGeneration != nil { + f = append(f, &metrics.Metric{ + Name: "kube_statefulset_status_observed_generation", + Value: float64(*s.Status.ObservedGeneration), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_replicas", + Type: metrics.MetricTypeGauge, + Help: "Number of desired pods for a StatefulSet.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + f := metrics.Family{} + + if s.Spec.Replicas != nil { + f = append(f, &metrics.Metric{ + Name: "kube_statefulset_replicas", + Value: float64(*s.Spec.Replicas), + }) + } + + return f + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_metadata_generation", + Type: metrics.MetricTypeGauge, + Help: "Sequence number representing a specific generation of the desired state for the StatefulSet.", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_metadata_generation", + Value: float64(s.ObjectMeta.Generation), + }} + }), + }, + metrics.FamilyGenerator{ + Name: descStatefulSetLabelsName, + Type: metrics.MetricTypeGauge, + Help: descStatefulSetLabelsHelp, + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + labelKeys, labelValues := kubeLabelsToPrometheusLabels(s.Labels) + return metrics.Family{&metrics.Metric{ + Name: descStatefulSetLabelsName, + LabelKeys: labelKeys, + LabelValues: labelValues, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_current_revision", + Type: metrics.MetricTypeGauge, + Help: "Indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_status_current_revision", + LabelKeys: []string{"revision"}, + LabelValues: []string{s.Status.CurrentRevision}, + Value: 1, + }} + }), + }, + metrics.FamilyGenerator{ + Name: "kube_statefulset_status_update_revision", + Type: metrics.MetricTypeGauge, + Help: "Indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + GenerateFunc: wrapStatefulSetFunc(func(s *v1beta1.StatefulSet) metrics.Family { + return metrics.Family{&metrics.Metric{ + Name: "kube_statefulset_status_update_revision", + LabelKeys: []string{"revision"}, + LabelValues: []string{s.Status.UpdateRevision}, + Value: 1, + }} + }), + }, + } +) + +func wrapStatefulSetFunc(f func(*v1beta1.StatefulSet) metrics.Family) func(interface{}) metrics.Family { + return func(obj interface{}) metrics.Family { + statefulSet := obj.(*v1beta1.StatefulSet) + + metricFamily := f(statefulSet) + + for _, m := range metricFamily { + m.LabelKeys = append(descStatefulSetLabelsDefaultLabels, m.LabelKeys...) + m.LabelValues = append([]string{statefulSet.Namespace, statefulSet.Name}, m.LabelValues...) + } + + return metricFamily + } +} + +func createStatefulSetListWatch(kubeClient clientset.Interface, ns string) cache.ListWatch { + return cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.AppsV1beta1().StatefulSets(ns).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.AppsV1beta1().StatefulSets(ns).Watch(opts) + }, + } +} diff --git a/pkg/collectors/statefulset_test.go b/pkg/collectors/statefulset_test.go new file mode 100644 index 0000000000..4387d180c2 --- /dev/null +++ b/pkg/collectors/statefulset_test.go @@ -0,0 +1,213 @@ +/* +Copyright 2017 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" + "time" + + "k8s.io/api/apps/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + statefulSet1Replicas int32 = 3 + statefulSet2Replicas int32 = 6 + statefulSet3Replicas int32 = 9 + + statefulSet1ObservedGeneration int64 = 1 + statefulSet2ObservedGeneration int64 = 2 +) + +func TestStatefuleSetCollector(t *testing.T) { + // Fixed metadata on type and help text. We prepend this to every expected + // output so we only have to modify a single place when doing adjustments. + const metadata = ` + # HELP kube_statefulset_created Unix creation timestamp + # TYPE kube_statefulset_created gauge + # HELP kube_statefulset_status_current_revision Indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). + # TYPE kube_statefulset_status_current_revision gauge + # HELP kube_statefulset_status_replicas The number of replicas per StatefulSet. + # TYPE kube_statefulset_status_replicas gauge + # HELP kube_statefulset_status_replicas_current The number of current replicas per StatefulSet. + # TYPE kube_statefulset_status_replicas_current gauge + # HELP kube_statefulset_status_replicas_ready The number of ready replicas per StatefulSet. + # TYPE kube_statefulset_status_replicas_ready gauge + # HELP kube_statefulset_status_replicas_updated The number of updated replicas per StatefulSet. + # TYPE kube_statefulset_status_replicas_updated gauge + # HELP kube_statefulset_status_observed_generation The generation observed by the StatefulSet controller. + # TYPE kube_statefulset_status_observed_generation gauge + # HELP kube_statefulset_status_update_revision Indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) + # TYPE kube_statefulset_status_update_revision gauge + # HELP kube_statefulset_replicas Number of desired pods for a StatefulSet. + # TYPE kube_statefulset_replicas gauge + # HELP kube_statefulset_metadata_generation Sequence number representing a specific generation of the desired state for the StatefulSet. + # TYPE kube_statefulset_metadata_generation gauge + # HELP kube_statefulset_labels Kubernetes labels converted to Prometheus labels. + # TYPE kube_statefulset_labels gauge + ` + cases := []generateMetricsTestCase{ + { + Obj: &v1beta1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "statefulset1", + CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)}, + Namespace: "ns1", + Labels: map[string]string{ + "app": "example1", + }, + Generation: 3, + }, + Spec: v1beta1.StatefulSetSpec{ + Replicas: &statefulSet1Replicas, + ServiceName: "statefulset1service", + }, + Status: v1beta1.StatefulSetStatus{ + ObservedGeneration: &statefulSet1ObservedGeneration, + Replicas: 2, + UpdateRevision: "ur1", + CurrentRevision: "cr1", + }, + }, + Want: ` + kube_statefulset_status_update_revision{namespace="ns1",revision="ur1",statefulset="statefulset1"} 1 + kube_statefulset_created{namespace="ns1",statefulset="statefulset1"} 1.5e+09 + kube_statefulset_status_current_revision{namespace="ns1",revision="cr1",statefulset="statefulset1"} 1 + kube_statefulset_status_replicas{namespace="ns1",statefulset="statefulset1"} 2 + kube_statefulset_status_replicas_current{namespace="ns1",statefulset="statefulset1"} 0 + kube_statefulset_status_replicas_ready{namespace="ns1",statefulset="statefulset1"} 0 + kube_statefulset_status_replicas_updated{namespace="ns1",statefulset="statefulset1"} 0 + kube_statefulset_status_observed_generation{namespace="ns1",statefulset="statefulset1"} 1 + kube_statefulset_replicas{namespace="ns1",statefulset="statefulset1"} 3 + kube_statefulset_metadata_generation{namespace="ns1",statefulset="statefulset1"} 3 + kube_statefulset_labels{label_app="example1",namespace="ns1",statefulset="statefulset1"} 1 +`, + MetricNames: []string{ + "kube_statefulset_created", + "kube_statefulset_labels", + "kube_statefulset_metadata_generation", + "kube_statefulset_replicas", + "kube_statefulset_status_observed_generation", + "kube_statefulset_status_replicas", + "kube_statefulset_status_replicas_current", + "kube_statefulset_status_replicas_ready", + "kube_statefulset_status_replicas_updated", + "kube_statefulset_status_update_revision", + "kube_statefulset_status_current_revision", + }, + }, + { + Obj: &v1beta1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "statefulset2", + Namespace: "ns2", + Labels: map[string]string{ + "app": "example2", + }, + Generation: 21, + }, + Spec: v1beta1.StatefulSetSpec{ + Replicas: &statefulSet2Replicas, + ServiceName: "statefulset2service", + }, + Status: v1beta1.StatefulSetStatus{ + CurrentReplicas: 2, + ObservedGeneration: &statefulSet2ObservedGeneration, + ReadyReplicas: 5, + Replicas: 5, + UpdatedReplicas: 3, + UpdateRevision: "ur2", + CurrentRevision: "cr2", + }, + }, + Want: ` + kube_statefulset_status_update_revision{namespace="ns2",revision="ur2",statefulset="statefulset2"} 1 + kube_statefulset_status_replicas{namespace="ns2",statefulset="statefulset2"} 5 + kube_statefulset_status_replicas_current{namespace="ns2",statefulset="statefulset2"} 2 + kube_statefulset_status_replicas_ready{namespace="ns2",statefulset="statefulset2"} 5 + kube_statefulset_status_replicas_updated{namespace="ns2",statefulset="statefulset2"} 3 + kube_statefulset_status_observed_generation{namespace="ns2",statefulset="statefulset2"} 2 + kube_statefulset_replicas{namespace="ns2",statefulset="statefulset2"} 6 + kube_statefulset_metadata_generation{namespace="ns2",statefulset="statefulset2"} 21 + kube_statefulset_labels{label_app="example2",namespace="ns2",statefulset="statefulset2"} 1 + kube_statefulset_status_current_revision{namespace="ns2",revision="cr2",statefulset="statefulset2"} 1 +`, + MetricNames: []string{ + "kube_statefulset_labels", + "kube_statefulset_metadata_generation", + "kube_statefulset_replicas", + "kube_statefulset_status_observed_generation", + "kube_statefulset_status_replicas", + "kube_statefulset_status_replicas_current", + "kube_statefulset_status_replicas_ready", + "kube_statefulset_status_replicas_updated", + "kube_statefulset_status_update_revision", + "kube_statefulset_status_current_revision", + }, + }, + { + Obj: &v1beta1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "statefulset3", + Namespace: "ns3", + Labels: map[string]string{ + "app": "example3", + }, + Generation: 36, + }, + Spec: v1beta1.StatefulSetSpec{ + Replicas: &statefulSet3Replicas, + ServiceName: "statefulset2service", + }, + Status: v1beta1.StatefulSetStatus{ + ObservedGeneration: nil, + Replicas: 7, + UpdateRevision: "ur3", + CurrentRevision: "cr3", + }, + }, + Want: ` + kube_statefulset_status_update_revision{namespace="ns3",revision="ur3",statefulset="statefulset3"} 1 + kube_statefulset_status_replicas{namespace="ns3",statefulset="statefulset3"} 7 + kube_statefulset_status_replicas_current{namespace="ns3",statefulset="statefulset3"} 0 + kube_statefulset_status_replicas_ready{namespace="ns3",statefulset="statefulset3"} 0 + kube_statefulset_status_replicas_updated{namespace="ns3",statefulset="statefulset3"} 0 + kube_statefulset_replicas{namespace="ns3",statefulset="statefulset3"} 9 + kube_statefulset_metadata_generation{namespace="ns3",statefulset="statefulset3"} 36 + kube_statefulset_labels{label_app="example3",namespace="ns3",statefulset="statefulset3"} 1 + kube_statefulset_status_current_revision{namespace="ns3",revision="cr3",statefulset="statefulset3"} 1 + `, + MetricNames: []string{ + "kube_statefulset_labels", + "kube_statefulset_metadata_generation", + "kube_statefulset_replicas", + "kube_statefulset_status_replicas", + "kube_statefulset_status_replicas_current", + "kube_statefulset_status_replicas_ready", + "kube_statefulset_status_replicas_updated", + "kube_statefulset_status_update_revision", + "kube_statefulset_status_current_revision", + }, + }, + } + for i, c := range cases { + c.Func = composeMetricGenFuncs(statefulSetMetricFamilies) + if err := c.run(); err != nil { + t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) + } + } +} diff --git a/pkg/collectors/testutils.go b/pkg/collectors/testutils.go new file mode 100644 index 0000000000..92d0746ba0 --- /dev/null +++ b/pkg/collectors/testutils.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +// TODO: Does this file need to be renamed to not be compiled in production? + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "k8s.io/kube-state-metrics/pkg/metrics_store" +) + +type generateMetricsTestCase struct { + Obj interface{} + MetricNames []string + Want string + Func func(interface{}) []metricsstore.FamilyStringer +} + +func (testCase *generateMetricsTestCase) run() error { + metricFamilies := testCase.Func(testCase.Obj) + metricFamilyStrings := []string{} + for _, f := range metricFamilies { + metricFamilyStrings = append(metricFamilyStrings, f.String()) + } + + metrics := strings.Split(strings.Join(metricFamilyStrings, ""), "\n") + + metrics = filterMetrics(metrics, testCase.MetricNames) + + out := strings.Join(metrics, "\n") + + if err := compareOutput(testCase.Want, out); err != nil { + return fmt.Errorf("expected wanted output to equal output: %v", err.Error()) + } + + return nil +} + +func compareOutput(a, b string) error { + entities := []string{a, b} + + // Align a and b + for i := 0; i < len(entities); i++ { + for _, f := range []func(string) string{removeUnusedWhitespace, sortLabels, sortByLine} { + entities[i] = f(entities[i]) + } + } + + if entities[0] != entities[1] { + return fmt.Errorf("expected a to equal b but got:\n%v\nand:\n%v", entities[0], entities[1]) + } + + return nil +} + +// sortLabels sorts the order of labels in each line of the given metrics. The +// Prometheus exposition format does not force ordering of labels. Hence a test +// should not fail due to different metric orders. +func sortLabels(s string) string { + sorted := []string{} + + for _, line := range strings.Split(s, "\n") { + split := strings.Split(line, "{") + if len(split) != 2 { + panic(fmt.Sprintf("failed to sort labels in \"%v\"", line)) + } + name := split[0] + + split = strings.Split(split[1], "}") + value := split[1] + + labels := strings.Split(split[0], ",") + sort.Strings(labels) + + sorted = append(sorted, fmt.Sprintf("%v{%v}%v", name, strings.Join(labels, ","), value)) + } + + return strings.Join(sorted, "\n") +} + +func sortByLine(s string) string { + split := strings.Split(s, "\n") + sort.Strings(split) + return strings.Join(split, "\n") +} + +func filterMetrics(ms []string, names []string) []string { + // In case the test case is based on all returned metrics, MetricNames does + // not need to me defined. + if names == nil { + return ms + } + filtered := []string{} + + regexps := []*regexp.Regexp{} + for _, n := range names { + regexps = append(regexps, regexp.MustCompile(fmt.Sprintf("^%v", n))) + } + + for _, m := range ms { + drop := true + for _, r := range regexps { + if r.MatchString(m) { + drop = false + break + } + } + if !drop { + filtered = append(filtered, m) + } + } + return filtered +} + +func removeUnusedWhitespace(s string) string { + var ( + trimmedLine string + trimmedLines []string + lines = strings.Split(s, "\n") + ) + + for _, l := range lines { + trimmedLine = strings.TrimSpace(l) + + if len(trimmedLine) > 0 { + trimmedLines = append(trimmedLines, trimmedLine) + } + } + + return strings.Join(trimmedLines, "\n") +} diff --git a/pkg/collectors/testutils_test.go b/pkg/collectors/testutils_test.go new file mode 100644 index 0000000000..f44c4d9897 --- /dev/null +++ b/pkg/collectors/testutils_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "testing" +) + +func TestSortLabels(t *testing.T) { + in := `kube_pod_container_info{container_id="docker://cd456",image="k8s.gcr.io/hyperkube2",container="container2",image_id="docker://sha256:bbb",namespace="ns2",pod="pod2"} 1 +kube_pod_container_info{namespace="ns2",container="container3",container_id="docker://ef789",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",pod="pod2"} 1` + + want := `kube_pod_container_info{container="container2",container_id="docker://cd456",image="k8s.gcr.io/hyperkube2",image_id="docker://sha256:bbb",namespace="ns2",pod="pod2"} 1 +kube_pod_container_info{container="container3",container_id="docker://ef789",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",namespace="ns2",pod="pod2"} 1` + + out := sortLabels(in) + + if want != out { + t.Fatalf("expected:\n%v\nbut got:\n%v", want, out) + } +} diff --git a/pkg/collectors/utils.go b/pkg/collectors/utils.go new file mode 100644 index 0000000000..c15adc529d --- /dev/null +++ b/pkg/collectors/utils.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectors + +import ( + "regexp" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/api/core/v1" + + "k8s.io/kube-state-metrics/pkg/metrics" +) + +var ( + resyncPeriod = 5 * time.Minute + + ScrapeErrorTotalMetric = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "ksm_scrape_error_total", + Help: "Total scrape errors encountered when scraping a resource", + }, + []string{"resource"}, + ) + + ResourcesPerScrapeMetric = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "ksm_resources_per_scrape", + Help: "Number of resources returned per scrape", + }, + []string{"resource"}, + ) + + invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) +) + +func boolFloat64(b bool) float64 { + if b { + return 1 + } + return 0 +} + +// addConditionMetrics generates one metric for each possible node condition +// status. For this function to work properly, the last label in the metric +// description must be the condition. +func addConditionMetrics(cs v1.ConditionStatus) []*metrics.Metric { + return []*metrics.Metric{ + &metrics.Metric{ + LabelValues: []string{"true"}, + Value: boolFloat64(cs == v1.ConditionTrue), + }, + &metrics.Metric{ + LabelValues: []string{"false"}, + Value: boolFloat64(cs == v1.ConditionFalse), + }, + &metrics.Metric{ + LabelValues: []string{"unknown"}, + Value: boolFloat64(cs == v1.ConditionUnknown), + }, + } +} + +func kubeLabelsToPrometheusLabels(labels map[string]string) ([]string, []string) { + labelKeys := make([]string, len(labels)) + labelValues := make([]string, len(labels)) + i := 0 + for k, v := range labels { + labelKeys[i] = "label_" + sanitizeLabelName(k) + labelValues[i] = v + i++ + } + return labelKeys, labelValues +} + +func kubeAnnotationsToPrometheusAnnotations(annotations map[string]string) ([]string, []string) { + annotationKeys := make([]string, len(annotations)) + annotationValues := make([]string, len(annotations)) + i := 0 + for k, v := range annotations { + annotationKeys[i] = "annotation_" + sanitizeLabelName(k) + annotationValues[i] = v + i++ + } + return annotationKeys, annotationValues +} + +func sanitizeLabelName(s string) string { + return invalidLabelCharRE.ReplaceAllString(s, "_") +} diff --git a/pkg/constant/resource_unit.go b/pkg/constant/resource_unit.go new file mode 100644 index 0000000000..81a9930beb --- /dev/null +++ b/pkg/constant/resource_unit.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constant + +type ResourceUnit string + +const ( + UnitByte ResourceUnit = "byte" + UnitCore ResourceUnit = "core" + UnitInteger ResourceUnit = "integer" +) diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 0000000000..50e163d330 --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "math" + "strconv" + "strings" + "sync" +) + +const ( + initialNumBufSize = 24 +) + +var ( + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// FamilyGenerator provides everything needed to generate a metric family with a +// Kubernetes object. +type FamilyGenerator struct { + Name string + Help string + Type MetricType + GenerateFunc func(obj interface{}) Family +} + +// Family represents a set of metrics with the same name and help text. +type Family []*Metric + +// String returns the given Family in its string representation. +func (f Family) String() string { + b := strings.Builder{} + for _, m := range f { + m.Write(&b) + } + + return b.String() +} + +// MetricType represents the type of a metric e.g. a counter. See +// https://prometheus.io/docs/concepts/metric_types/. +type MetricType string + +// MetricTypeGauge defines a Prometheus gauge. +var MetricTypeGauge MetricType = "gauge" + +// MetricTypeCounter defines a Prometheus counter. +var MetricTypeCounter MetricType = "counter" + +// Metric represents a single time series. +type Metric struct { + Name string + LabelKeys []string + LabelValues []string + Value float64 +} + +func (m *Metric) Write(s *strings.Builder) { + if len(m.LabelKeys) != len(m.LabelValues) { + panic("expected labelKeys to be of same length as labelValues") + } + + s.WriteString(m.Name) + labelsToString(s, m.LabelKeys, m.LabelValues) + s.WriteByte(' ') + writeFloat(s, m.Value) + s.WriteByte('\n') +} + +func labelsToString(m *strings.Builder, keys, values []string) { + if len(keys) > 0 { + var separator byte = '{' + + for i := 0; i < len(keys); i++ { + m.WriteByte(separator) + m.WriteString(keys[i]) + m.WriteString("=\"") + escapeString(m, values[i]) + m.WriteByte('"') + separator = ',' + } + + m.WriteByte('}') + } +} + +var ( + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and '"' by +// '\"'. +// Taken from github.com/prometheus/common/expfmt/text_create.go. +func escapeString(m *strings.Builder, v string) { + escapeWithDoubleQuote.WriteString(m, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +// Taken from github.com/prometheus/common/expfmt/text_create.go. +func writeFloat(w *strings.Builder, f float64) { + switch { + case f == 1: + w.WriteByte('1') + case f == 0: + w.WriteByte('0') + case f == -1: + w.WriteString("-1") + case math.IsNaN(f): + w.WriteString("NaN") + case math.IsInf(f, +1): + w.WriteString("+Inf") + case math.IsInf(f, -1): + w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + w.Write(*bp) + numBufPool.Put(bp) + } +} diff --git a/pkg/metrics/metrics_test.go b/pkg/metrics/metrics_test.go new file mode 100644 index 0000000000..5c135f9c5c --- /dev/null +++ b/pkg/metrics/metrics_test.go @@ -0,0 +1,72 @@ +package metrics + +import ( + "strings" + "testing" +) + +func TestFamilyString(t *testing.T) { + m := Metric{ + Name: "kube_pod_info", + LabelKeys: []string{"namespace"}, + LabelValues: []string{"default"}, + Value: 1, + } + + f := Family{&m} + + expected := "kube_pod_info{namespace=\"default\"} 1" + got := strings.TrimSpace(f.String()) + + if got != expected { + t.Fatalf("expected %v but got %v", expected, got) + } +} + +func BenchmarkMetricWrite(b *testing.B) { + tests := []struct { + testName string + metric Metric + expectedLength int + }{ + { + testName: "value-1", + metric: Metric{ + Name: "kube_pod_container_info", + LabelKeys: []string{"container", "container_id", "image", "image_id", "namespace", "pod"}, + LabelValues: []string{"container2", "docker://cd456", "k8s.gcr.io/hyperkube2", "docker://sha256:bbb", "ns2", "pod2"}, + Value: float64(1), + }, + expectedLength: 168, + }, + { + testName: "value-35.7", + metric: Metric{ + Name: "kube_pod_container_info", + LabelKeys: []string{"container", "container_id", "image", "image_id", "namespace", "pod"}, + LabelValues: []string{"container2", "docker://cd456", "k8s.gcr.io/hyperkube2", "docker://sha256:bbb", "ns2", "pod2"}, + Value: float64(35.7), + }, + expectedLength: 171, + }, + } + + for _, test := range tests { + b.Run(test.testName, func(b *testing.B) { + for i := 0; i < b.N; i++ { + builder := strings.Builder{} + + test.metric.Write(&builder) + + s := builder.String() + + // Ensuring that the string is actually build, not optimized + // away by compilation. + got := len(s) + if test.expectedLength != got { + b.Fatalf("expected string of length %v but got %v", test.expectedLength, got) + } + } + }) + } +} diff --git a/pkg/metrics_store/metrics_store.go b/pkg/metrics_store/metrics_store.go new file mode 100644 index 0000000000..0a616f8bfb --- /dev/null +++ b/pkg/metrics_store/metrics_store.go @@ -0,0 +1,142 @@ +package metricsstore + +import ( + "io" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" +) + +// FamilyStringer represents a metric family that can be converted to its string +// representation. +type FamilyStringer interface { + String() string +} + +// MetricsStore implements the k8s.io/kubernetes/client-go/tools/cache.Store +// interface. Instead of storing entire Kubernetes objects, it stores metrics +// generated based on those objects. +type MetricsStore struct { + // Protects metrics + mutex sync.RWMutex + // metrics is a map indexed by Kubernetes object id, containing a slice of + // metric families, containing a slice of metrics. We need to keep metrics + // grouped by metric families in order to zip families with their help text in + // MetricsStore.WriteAll(). + metrics map[types.UID][]string + // headers contains the header (TYPE and HELP) of each metric family. It is + // later on zipped with with their corresponding metric families in + // MetricStore.WriteAll(). + headers []string + + // generateMetricsFunc generates metrics based on a given Kubernetes object + // and returns them grouped by metric family. + generateMetricsFunc func(interface{}) []FamilyStringer +} + +// NewMetricsStore returns a new MetricsStore +func NewMetricsStore(headers []string, generateFunc func(interface{}) []FamilyStringer) *MetricsStore { + return &MetricsStore{ + generateMetricsFunc: generateFunc, + headers: headers, + metrics: map[types.UID][]string{}, + } +} + +// Implementing k8s.io/kubernetes/client-go/tools/cache.Store interface + +// TODO: Proper comments on all functions below. +func (s *MetricsStore) Add(obj interface{}) error { + o, err := meta.Accessor(obj) + if err != nil { + return err + } + + s.mutex.Lock() + defer s.mutex.Unlock() + + families := s.generateMetricsFunc(obj) + familyStrings := make([]string, len(families)) + + for i, f := range families { + familyStrings[i] = f.String() + } + + s.metrics[o.GetUID()] = familyStrings + + return nil +} + +func (s *MetricsStore) Update(obj interface{}) error { + // For now, just call Add, in the future one could check if the resource + // version changed? + return s.Add(obj) +} + +func (s *MetricsStore) Delete(obj interface{}) error { + + o, err := meta.Accessor(obj) + if err != nil { + return err + } + + s.mutex.Lock() + defer s.mutex.Unlock() + + delete(s.metrics, o.GetUID()) + + return nil +} + +func (s *MetricsStore) List() []interface{} { + return nil +} + +func (s *MetricsStore) ListKeys() []string { + return nil +} + +func (s *MetricsStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + return nil, false, nil +} + +func (s *MetricsStore) GetByKey(key string) (item interface{}, exists bool, err error) { + return nil, false, nil +} + +// Replace will delete the contents of the store, using instead the +// given list. +func (s *MetricsStore) Replace(list []interface{}, _ string) error { + s.mutex.Lock() + s.metrics = map[types.UID][]string{} + s.mutex.Unlock() + + for _, o := range list { + err := s.Add(o) + if err != nil { + return err + } + } + + return nil +} + +func (s *MetricsStore) Resync() error { + return nil +} + +// WriteAll writes all metrics of the store into the given writer, zipped with the +// help text of each metric family. +func (s *MetricsStore) WriteAll(w io.Writer) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for i, help := range s.headers { + w.Write([]byte(help)) + w.Write([]byte{'\n'}) + for _, metricFamilies := range s.metrics { + w.Write([]byte(metricFamilies[i])) + } + } +} diff --git a/pkg/metrics_store/metrics_store_test.go b/pkg/metrics_store/metrics_store_test.go new file mode 100644 index 0000000000..2aed8bfd2b --- /dev/null +++ b/pkg/metrics_store/metrics_store_test.go @@ -0,0 +1,61 @@ +package metricsstore + +import ( + "fmt" + "strings" + "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kube-state-metrics/pkg/metrics" +) + +func TestObjectsSameNameDifferentNamespaces(t *testing.T) { + serviceIDS := []string{"a", "b"} + + genFunc := func(obj interface{}) []FamilyStringer { + o, err := meta.Accessor(obj) + if err != nil { + t.Fatal(err) + } + + metric := metrics.Metric{ + Name: "kube_service_info", + LabelKeys: []string{"uid"}, + LabelValues: []string{string(o.GetUID())}, + Value: 1, + } + metricFamily := metrics.Family{&metric} + + return []FamilyStringer{metricFamily} + } + + ms := NewMetricsStore([]string{"Information about service."}, genFunc) + + for _, id := range serviceIDS { + s := v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service", + Namespace: id, + UID: types.UID(id), + }, + } + + err := ms.Add(&s) + if err != nil { + t.Fatal(err) + } + } + + w := strings.Builder{} + ms.WriteAll(&w) + m := w.String() + + for _, id := range serviceIDS { + if !strings.Contains(m, fmt.Sprintf("uid=\"%v\"", id)) { + t.Fatalf("expected to find metric with uid %v", id) + } + } +} diff --git a/pkg/options/collector.go b/pkg/options/collector.go new file mode 100644 index 0000000000..b00b3f3daa --- /dev/null +++ b/pkg/options/collector.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + DefaultNamespaces = NamespaceList{metav1.NamespaceAll} + DefaultCollectors = CollectorSet{ + "daemonsets": struct{}{}, + "deployments": struct{}{}, + "limitranges": struct{}{}, + "nodes": struct{}{}, + "pods": struct{}{}, + "poddisruptionbudgets": struct{}{}, + "replicasets": struct{}{}, + "replicationcontrollers": struct{}{}, + "resourcequotas": struct{}{}, + "services": struct{}{}, + "jobs": struct{}{}, + "cronjobs": struct{}{}, + "statefulsets": struct{}{}, + "persistentvolumes": struct{}{}, + "persistentvolumeclaims": struct{}{}, + "namespaces": struct{}{}, + "horizontalpodautoscalers": struct{}{}, + "endpoints": struct{}{}, + "secrets": struct{}{}, + "configmaps": struct{}{}, + } +) diff --git a/pkg/options/options.go b/pkg/options/options.go new file mode 100644 index 0000000000..f4b16f790f --- /dev/null +++ b/pkg/options/options.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "flag" + "fmt" + "os" + + "github.com/spf13/pflag" +) + +type Options struct { + Apiserver string + Kubeconfig string + Help bool + Port int + Host string + TelemetryPort int + TelemetryHost string + Collectors CollectorSet + Namespaces NamespaceList + MetricBlacklist MetricSet + MetricWhitelist MetricSet + Version bool + DisablePodNonGenericResourceMetrics bool + DisableNodeNonGenericResourceMetrics bool + + EnableGZIPEncoding bool + + flags *pflag.FlagSet +} + +func NewOptions() *Options { + return &Options{ + Collectors: CollectorSet{}, + MetricWhitelist: MetricSet{}, + MetricBlacklist: MetricSet{}, + } +} + +func (o *Options) AddFlags() { + o.flags = pflag.NewFlagSet("", pflag.ExitOnError) + // add glog flags + o.flags.AddGoFlagSet(flag.CommandLine) + o.flags.Lookup("logtostderr").Value.Set("true") + o.flags.Lookup("logtostderr").DefValue = "true" + o.flags.Lookup("logtostderr").NoOptDefVal = "true" + + o.flags.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + o.flags.PrintDefaults() + } + + o.flags.StringVar(&o.Apiserver, "apiserver", "", `The URL of the apiserver to use as a master`) + o.flags.StringVar(&o.Kubeconfig, "kubeconfig", "", "Absolute path to the kubeconfig file") + o.flags.BoolVarP(&o.Help, "help", "h", false, "Print Help text") + o.flags.IntVar(&o.Port, "port", 80, `Port to expose metrics on.`) + o.flags.StringVar(&o.Host, "host", "0.0.0.0", `Host to expose metrics on.`) + o.flags.IntVar(&o.TelemetryPort, "telemetry-port", 81, `Port to expose kube-state-metrics self metrics on.`) + o.flags.StringVar(&o.TelemetryHost, "telemetry-host", "0.0.0.0", `Host to expose kube-state-metrics self metrics on.`) + o.flags.Var(&o.Collectors, "collectors", fmt.Sprintf("Comma-separated list of collectors to be enabled. Defaults to %q", &DefaultCollectors)) + o.flags.Var(&o.Namespaces, "namespace", fmt.Sprintf("Comma-separated list of namespaces to be enabled. Defaults to %q", &DefaultNamespaces)) + o.flags.Var(&o.MetricWhitelist, "metric-whitelist", "Comma-separated list of metrics to be exposed. The whitelist and blacklist are mutually exclusive.") + o.flags.Var(&o.MetricBlacklist, "metric-blacklist", "Comma-separated list of metrics not to be enabled. The whitelist and blacklist are mutually exclusive.") + o.flags.BoolVarP(&o.Version, "version", "", false, "kube-state-metrics build version information") + o.flags.BoolVarP(&o.DisablePodNonGenericResourceMetrics, "disable-pod-non-generic-resource-metrics", "", false, "Disable pod non generic resource request and limit metrics") + o.flags.BoolVarP(&o.DisableNodeNonGenericResourceMetrics, "disable-node-non-generic-resource-metrics", "", false, "Disable node non generic resource request and limit metrics") + o.flags.BoolVar(&o.EnableGZIPEncoding, "enable-gzip-encoding", false, "Gzip responses when requested by clients via 'Accept-Encoding: gzip' header.") +} + +func (o *Options) Parse() error { + err := o.flags.Parse(os.Args) + return err +} + +func (o *Options) Usage() { + o.flags.Usage() +} diff --git a/pkg/options/options_test.go b/pkg/options/options_test.go new file mode 100644 index 0000000000..f0e63064e1 --- /dev/null +++ b/pkg/options/options_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "os" + "sync" + "testing" + + "github.com/spf13/pflag" +) + +func TestOptionsParse(t *testing.T) { + tests := []struct { + Desc string + Args []string + RecoverInvoked bool + }{ + { + Desc: "collectors command line argument", + Args: []string{"./kube-state-metrics", "--collectors=configmaps,pods"}, + RecoverInvoked: false, + }, + { + Desc: "namespace command line argument", + Args: []string{"./kube-state-metrics", "--namespace=default,kube-system"}, + RecoverInvoked: false, + }, + } + + for _, test := range tests { + var wg sync.WaitGroup + + opts := NewOptions() + opts.AddFlags() + + flags := pflag.NewFlagSet("options_test", pflag.PanicOnError) + flags.AddFlagSet(opts.flags) + + opts.flags = flags + + os.Args = test.Args + + wg.Add(1) + go func() { + defer wg.Done() + defer func() { + if err := recover(); err != nil { + test.RecoverInvoked = true + } + }() + + opts.Parse() + }() + + wg.Wait() + if test.RecoverInvoked { + t.Errorf("Test error for Desc: %s. Test panic", test.Desc) + } + } +} diff --git a/pkg/options/types.go b/pkg/options/types.go new file mode 100644 index 0000000000..d9517fa6eb --- /dev/null +++ b/pkg/options/types.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "sort" + "strings" + + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type MetricSet map[string]struct{} + +func (ms *MetricSet) String() string { + s := *ms + ss := s.asSlice() + sort.Strings(ss) + return strings.Join(ss, ",") +} + +func (ms *MetricSet) Set(value string) error { + s := *ms + metrics := strings.Split(value, ",") + for _, metric := range metrics { + metric = strings.TrimSpace(metric) + if len(metric) != 0 { + s[metric] = struct{}{} + } + } + return nil +} + +func (ms MetricSet) asSlice() []string { + metrics := []string{} + for metric := range ms { + metrics = append(metrics, metric) + } + return metrics +} + +func (ms MetricSet) IsEmpty() bool { + return len(ms.asSlice()) == 0 +} + +func (ms *MetricSet) Type() string { + return "string" +} + +type CollectorSet map[string]struct{} + +func (c *CollectorSet) String() string { + s := *c + ss := s.AsSlice() + sort.Strings(ss) + return strings.Join(ss, ",") +} + +func (c *CollectorSet) Set(value string) error { + s := *c + cols := strings.Split(value, ",") + for _, col := range cols { + col = strings.TrimSpace(col) + if len(col) != 0 { + _, ok := DefaultCollectors[col] + if !ok { + return fmt.Errorf("collector \"%s\" does not exist", col) + } + s[col] = struct{}{} + } + } + return nil +} + +func (c CollectorSet) AsSlice() []string { + cols := []string{} + for col := range c { + cols = append(cols, col) + } + return cols +} + +func (c CollectorSet) isEmpty() bool { + return len(c.AsSlice()) == 0 +} + +func (c *CollectorSet) Type() string { + return "string" +} + +type NamespaceList []string + +func (n *NamespaceList) String() string { + return strings.Join(*n, ",") +} + +func (n *NamespaceList) IsAllNamespaces() bool { + return len(*n) == 1 && (*n)[0] == metav1.NamespaceAll +} + +func (n *NamespaceList) Set(value string) error { + splittedNamespaces := strings.Split(value, ",") + for _, ns := range splittedNamespaces { + ns = strings.TrimSpace(ns) + if len(ns) != 0 { + *n = append(*n, ns) + } + } + return nil +} + +func (n *NamespaceList) Type() string { + return "string" +} diff --git a/pkg/options/types_test.go b/pkg/options/types_test.go new file mode 100644 index 0000000000..f6de0c7db6 --- /dev/null +++ b/pkg/options/types_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "reflect" + "testing" +) + +func TestCollectorSetSet(t *testing.T) { + tests := []struct { + Desc string + Value string + Wanted CollectorSet + WantedError bool + }{ + { + Desc: "empty collectors", + Value: "", + Wanted: CollectorSet{}, + WantedError: false, + }, + { + Desc: "normal collectors", + Value: "configmaps,cronjobs,daemonsets,deployments", + Wanted: CollectorSet(map[string]struct{}{ + "configmaps": {}, + "cronjobs": {}, + "daemonsets": {}, + "deployments": {}, + }), + WantedError: false, + }, + { + Desc: "none exist collectors", + Value: "none-exists", + Wanted: CollectorSet{}, + WantedError: true, + }, + } + + for _, test := range tests { + cs := &CollectorSet{} + gotError := cs.Set(test.Value) + if !(((gotError == nil && !test.WantedError) || (gotError != nil && test.WantedError)) && reflect.DeepEqual(*cs, test.Wanted)) { + t.Errorf("Test error for Desc: %s. Want: %+v. Got: %+v. Wanted Error: %v, Got Error: %v", test.Desc, test.Wanted, *cs, test.WantedError, gotError) + } + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 0000000000..cb188122a2 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +var ( + // Release returns the release version + Release = "UNKNOWN" + // Commit returns the short sha from git + Commit = "UNKNOWN" + // BuildDate is the build date + BuildDate = "" +) + +type Version struct { + GitCommit string + BuildDate string + Release string + GoVersion string + Compiler string + Platform string +} + +func (v Version) String() string { + return fmt.Sprintf("%s/%s (%s/%s) kube-state-metrics/%s", + filepath.Base(os.Args[0]), v.Release, + runtime.GOOS, runtime.GOARCH, v.GitCommit) +} + +// GetVersion returns kube-state-metrics version +func GetVersion() Version { + return Version{ + GitCommit: Commit, + BuildDate: BuildDate, + Release: Release, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/pkg/whiteblacklist/whiteblacklist.go b/pkg/whiteblacklist/whiteblacklist.go new file mode 100644 index 0000000000..8509db621c --- /dev/null +++ b/pkg/whiteblacklist/whiteblacklist.go @@ -0,0 +1,107 @@ +package whiteblacklist + +import ( + "errors" + "strings" +) + +// WhiteBlackList encapsulates the logic needed to filter based on a string. +type WhiteBlackList struct { + list map[string]struct{} + isWhiteList bool +} + +// New constructs a new WhtieBlackList based on a white- and a +// blacklist. Only one of them can be not empty. +func New(w, b map[string]struct{}) (*WhiteBlackList, error) { + if len(w) != 0 && len(b) != 0 { + return nil, errors.New( + "whitelist and blacklist are both set, they are mutually exclusive, only one of them can be set", + ) + } + + white := copyList(w) + black := copyList(b) + + var list map[string]struct{} + var isWhiteList bool + + // Default to blacklisting + if len(white) != 0 { + list = white + isWhiteList = true + } else { + list = black + isWhiteList = false + } + + return &WhiteBlackList{ + list: list, + isWhiteList: isWhiteList, + }, nil +} + +// Include includes the given items in the list. +func (l *WhiteBlackList) Include(items []string) { + if l.isWhiteList { + for _, item := range items { + l.list[item] = struct{}{} + } + } else { + for _, item := range items { + delete(l.list, item) + } + } +} + +// Exclude excludes the given items from the list. +func (l *WhiteBlackList) Exclude(items []string) { + if l.isWhiteList { + for _, item := range items { + delete(l.list, item) + } + } else { + for _, item := range items { + l.list[item] = struct{}{} + } + } +} + +// IsIncluded returns if the given item is included. +func (l *WhiteBlackList) IsIncluded(item string) bool { + _, exists := l.list[item] + + if l.isWhiteList { + return exists + } + + return !exists +} + +// IsExcluded returns if the given item is excluded. +func (l *WhiteBlackList) IsExcluded(item string) bool { + return !l.IsIncluded(item) +} + +// Status returns the status of the WhtieBlackList that can e.g. be passed into +// a logger. +func (l *WhiteBlackList) Status() string { + items := []string{} + for key := range l.list { + items = append(items, key) + } + + if l.isWhiteList { + return "whitelisting the following items: " + strings.Join(items, ", ") + } + + return "blacklisting the following items: " + strings.Join(items, ", ") +} + +func copyList(l map[string]struct{}) map[string]struct{} { + newList := map[string]struct{}{} + for k, v := range l { + newList[k] = v + } + return newList +} diff --git a/pkg/whiteblacklist/whiteblacklist_test.go b/pkg/whiteblacklist/whiteblacklist_test.go new file mode 100644 index 0000000000..dfd866f1ac --- /dev/null +++ b/pkg/whiteblacklist/whiteblacklist_test.go @@ -0,0 +1,104 @@ +package whiteblacklist + +import ( + "testing" +) + +func TestNew(t *testing.T) { + t.Run("fails with two non empty maps", func(t *testing.T) { + _, err := New(map[string]struct{}{"not-empty": struct{}{}}, map[string]struct{}{"not-empty": struct{}{}}) + if err == nil { + t.Fatal("expected New() to fail with two non-empty maps") + } + }) + + t.Run("defaults to blacklisting", func(t *testing.T) { + l, err := New(map[string]struct{}{}, map[string]struct{}{}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + if l.isWhiteList { + t.Fatal("expected whiteBlackList to default to blacklist") + } + }) + + t.Run("if whitelist set, should be whitelist", func(t *testing.T) { + list, err := New(map[string]struct{}{"not-empty": struct{}{}}, map[string]struct{}{}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + if !list.isWhiteList { + t.Fatal("expected list to be whitelist") + } + }) + + t.Run("if blacklist set, should be blacklist", func(t *testing.T) { + list, err := New(map[string]struct{}{}, map[string]struct{}{"not-empty": struct{}{}}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + if list.isWhiteList { + t.Fatal("expected list to be blacklist") + } + }) +} + +func TestInclude(t *testing.T) { + t.Run("adds when whitelist", func(t *testing.T) { + whitelist, err := New(map[string]struct{}{"not-empty": struct{}{}}, map[string]struct{}{}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + whitelist.Include([]string{"item1"}) + + if !whitelist.IsIncluded("item1") { + t.Fatal("expected included item to be included") + } + }) + t.Run("removes when blacklist", func(t *testing.T) { + item1 := "item1" + blacklist, err := New(map[string]struct{}{}, map[string]struct{}{item1: struct{}{}}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + blacklist.Include([]string{item1}) + + if !blacklist.IsIncluded(item1) { + t.Fatal("expected included item to be included") + } + }) +} + +func TestExclude(t *testing.T) { + t.Run("removes when whitelist", func(t *testing.T) { + item1 := "item1" + whitelist, err := New(map[string]struct{}{item1: struct{}{}}, map[string]struct{}{}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + whitelist.Exclude([]string{item1}) + + if whitelist.IsIncluded(item1) { + t.Fatal("expected excluded item to be excluded") + } + }) + t.Run("removes when blacklist", func(t *testing.T) { + item1 := "item1" + blacklist, err := New(map[string]struct{}{}, map[string]struct{}{"not-empty": struct{}{}}) + if err != nil { + t.Fatal("expected New() to not fail") + } + + blacklist.Exclude([]string{item1}) + + if blacklist.IsIncluded(item1) { + t.Fatal("expected excluded item to be excluded") + } + }) +} diff --git a/scripts/e2e.sh b/scripts/e2e.sh deleted file mode 100755 index b1bb1534f4..0000000000 --- a/scripts/e2e.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -set -o pipefail - -KUBERNETES_VERSION=v1.8.0 -KUBE_STATE_METRICS_LOG_DIR=./log -KUBE_STATE_METRICS_IMAGE_NAME='quay.io/coreos/kube-state-metrics' -KUBE_STATE_METRICS_IMAGE_NAME_PATTERN='quay.io\/coreos\/kube-state-metrics' -PROMETHEUS_VERSION=2.0.0 - -mkdir -p $KUBE_STATE_METRICS_LOG_DIR - -# setup a Kubernetes cluster -curl -sLo minikube https://storage.googleapis.com/minikube/releases/v0.25.2/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ - -minikube version - -curl -sLo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ - -export MINIKUBE_WANTUPDATENOTIFICATION=false -export MINIKUBE_WANTREPORTERRORPROMPT=false -export MINIKUBE_HOME=$HOME -export CHANGE_MINIKUBE_NONE_USER=true -mkdir $HOME/.kube || true -touch $HOME/.kube/config - -export KUBECONFIG=$HOME/.kube/config -sudo minikube start --vm-driver=none --kubernetes-version=$KUBERNETES_VERSION --logtostderr - -minikube update-context - -set +e - -is_kube_running="false" - -# this for loop waits until kubectl can access the api server that Minikube has created -for i in {1..90}; do # timeout for 3 minutes - kubectl get po 1>/dev/null 2>&1 - if [ $? -ne 1 ]; then - is_kube_running="true" - break - fi - - echo "waiting for Kubernetes cluster up" - sleep 2 -done - -if [ $is_kube_running == "false" ]; then - minikube logs - echo "Kubernetes does not start within 3 minutes" - exit 1 -fi - -set -e - -kubectl version - -# query kube-state-metrics image tag -make container -docker images -a -ksm_image_tag=`docker images -a|grep 'quay.io/coreos/kube-state-metrics'|awk '{print $2}'|sort -u` -echo "local kube-state-metrics image tag: $ksm_image_tag" - -# update kube-state-metrics image tag in kube-state-metrics-deployment.yaml -sed -i.bak "s/$KUBE_STATE_METRICS_IMAGE_NAME_PATTERN:v.*/$KUBE_STATE_METRICS_IMAGE_NAME_PATTERN:$ksm_image_tag/g" ./kubernetes/kube-state-metrics-deployment.yaml -cat ./kubernetes/kube-state-metrics-deployment.yaml - -# set up kube-state-metrics manifests -kubectl create -f ./kubernetes/kube-state-metrics-service-account.yaml - -kubectl create -f ./kubernetes/kube-state-metrics-cluster-role.yaml -kubectl create -f ./kubernetes/kube-state-metrics-cluster-role-binding.yaml - -kubectl create -f ./kubernetes/kube-state-metrics-role-binding.yaml -kubectl create -f ./kubernetes/kube-state-metrics-role.yaml - -kubectl create -f ./kubernetes/kube-state-metrics-deployment.yaml - -kubectl create -f ./kubernetes/kube-state-metrics-service.yaml - -echo "make requests to kube-state-metrics" - -set +e - -is_kube_state_metrics_running="false" - -kubectl proxy & - -# this for loop waits until kube-state-metrics is running by accessing the healthz endpoint -for i in {1..30}; do # timeout for 1 minutes - KUBE_STATE_METRICS_STATUS=$(curl -s "http://localhost:8001/api/v1/proxy/namespaces/kube-system/services/kube-state-metrics:8080/healthz") - if [ "$KUBE_STATE_METRICS_STATUS" == "ok" ]; then - is_kube_state_metrics_running="true" - break - fi - - echo "waiting for Kube-state-metrics up" - sleep 2 -done - -if [ $is_kube_state_metrics_running != "true" ]; then - kubectl --namespace=kube-system logs deployment/kube-state-metrics kube-state-metrics - echo "kube-state-metrics does not start within 1 minute" - exit 1 -fi - -set -e - -echo "kube-state-metrics is up and running" - -echo "access kube-state-metrics metrics endpoint" -curl -s "http://localhost:8001/api/v1/proxy/namespaces/kube-system/services/kube-state-metrics:8080/metrics" >$KUBE_STATE_METRICS_LOG_DIR/metrics - -echo "check metrics format with promtool" -wget -q -O /tmp/prometheus.tar.gz https://github.com/prometheus/prometheus/releases/download/v$PROMETHEUS_VERSION/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz -tar zxfv /tmp/prometheus.tar.gz -C /tmp -cat $KUBE_STATE_METRICS_LOG_DIR/metrics | /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64/promtool check metrics - -KUBE_STATE_METRICS_STATUS=$(curl -s "http://localhost:8001/api/v1/proxy/namespaces/kube-system/services/kube-state-metrics:8080/healthz") -if [ "$KUBE_STATE_METRICS_STATUS" == "ok" ]; then - echo "kube-state-metrics is still running after accessing metrics endpoint" - exit 0 -fi - -# wait for glog to flush to log file -sleep 33 -kubectl --namespace=kube-system logs deployment/kube-state-metrics kube-state-metrics -exit 1 diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..47ae944672 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,19 @@ +# End2end testsuite + +This folder contains simple e2e tests. +When launched it spins up a kubernetes cluster using minikube, creates several kubernetes resources and launches a kube-state-metrics deployment. +Then, it downloads kube-state-metrics' metrics and examines validity using `promtool` tool. + +The testsuite is run automatically using Travis. + +## Running locally + +In case you need to run e2e test manually on your local machine, you can configure the `e2e.sh` script by few environment variables. + +```bash +export E2E_SETUP_MINIKUBE= # set to empty string if you have already your own minikube binary, prevents from downloading one +export E2E_SETUP_KUBECTL= # set to empty string if you have already your own kubectl binary, prevents from downloading one +export MINIKUBE_DRIVER=virtualbox # choose minikube's driver of your choice +export SUDO= # if you don't need sudo, you can redefine the SUDO variable from default `sudo` +./tests/e2e.sh +``` diff --git a/tests/e2e.sh b/tests/e2e.sh new file mode 100755 index 0000000000..80de57cb22 --- /dev/null +++ b/tests/e2e.sh @@ -0,0 +1,187 @@ +#!/bin/bash + +# Copyright 2017 The Kubernetes Authors All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -o pipefail + +KUBERNETES_VERSION=v1.10.0 +KUBE_STATE_METRICS_LOG_DIR=./log +KUBE_STATE_METRICS_IMAGE_NAME='quay.io/coreos/kube-state-metrics' +PROMETHEUS_VERSION=2.0.0 +E2E_SETUP_MINIKUBE=${E2E_SETUP_MINIKUBE-yes} +E2E_SETUP_KUBECTL=${E2E_SETUP_KUBECTL-yes} +E2E_SETUP_PROMTOOL=${E2E_SETUP_PROMTOOL-yes} +MINIKUBE_DRIVER=${MINIKUBE_DRIVER:-none} +SUDO=${SUDO-sudo} + +mkdir -p $KUBE_STATE_METRICS_LOG_DIR + +function finish() { + echo "calling cleanup function" + # kill kubectl proxy in background + kill %1 || true + kubectl delete -f kubernetes/ || true + kubectl delete -f tests/manifests/ || true +} + +function setup_minikube() { + curl -sLo minikube https://storage.googleapis.com/minikube/releases/v0.25.2/minikube-linux-amd64 \ + && chmod +x minikube \ + && $SUDO mv minikube /usr/local/bin/ +} + +function setup_kubectl() { + curl -sLo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ + && chmod +x kubectl \ + && $SUDO mv kubectl /usr/local/bin/ +} + +function setup_promtool() { + wget -q -O /tmp/prometheus.tar.gz https://github.com/prometheus/prometheus/releases/download/v$PROMETHEUS_VERSION/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz + tar zxfv /tmp/prometheus.tar.gz -C /tmp/ prometheus-$PROMETHEUS_VERSION.linux-amd64/promtool + $SUDO mv /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64/promtool /usr/local/bin/ + rmdir /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64 + rm /tmp/prometheus.tar.gz +} + +[ -n "$E2E_SETUP_MINIKUBE" ] && setup_minikube + +minikube version + +[ -n "$E2E_SETUP_KUBECTL" ] && setup_kubectl + +export MINIKUBE_WANTUPDATENOTIFICATION=false +export MINIKUBE_WANTREPORTERRORPROMPT=false +export MINIKUBE_HOME=$HOME +export CHANGE_MINIKUBE_NONE_USER=true +mkdir $HOME/.kube || true +touch $HOME/.kube/config + +export KUBECONFIG=$HOME/.kube/config +$SUDO minikube start --vm-driver=$MINIKUBE_DRIVER --kubernetes-version=$KUBERNETES_VERSION --logtostderr + +minikube update-context + +set +e + +is_kube_running="false" + +# this for loop waits until kubectl can access the api server that Minikube has created +for i in {1..90}; do # timeout for 3 minutes + kubectl get po 1>/dev/null 2>&1 + if [ $? -ne 1 ]; then + is_kube_running="true" + break + fi + + echo "waiting for Kubernetes cluster up" + sleep 2 +done + +if [ $is_kube_running == "false" ]; then + minikube logs + echo "Kubernetes does not start within 3 minutes" + exit 1 +fi + +set -e + +kubectl version + +# ensure that we build docker image in minikube +[ "$MINIKUBE_DRIVER" != "none" ] && eval $(minikube docker-env) + +# query kube-state-metrics image tag +make container +docker images -a +ksm_image_tag=`docker images -a|grep 'quay.io/coreos/kube-state-metrics'|grep -v 'latest'|awk '{print $2}'|sort -u` +echo "local kube-state-metrics image tag: $ksm_image_tag" + +# update kube-state-metrics image tag in kube-state-metrics-deployment.yaml +sed -i.bak "s|$KUBE_STATE_METRICS_IMAGE_NAME:v.*|$KUBE_STATE_METRICS_IMAGE_NAME:$ksm_image_tag|g" ./kubernetes/kube-state-metrics-deployment.yaml +cat ./kubernetes/kube-state-metrics-deployment.yaml + +trap finish EXIT + +# set up kube-state-metrics manifests +kubectl create -f ./kubernetes/kube-state-metrics-service-account.yaml + +kubectl create -f ./kubernetes/kube-state-metrics-cluster-role.yaml +kubectl create -f ./kubernetes/kube-state-metrics-cluster-role-binding.yaml + +kubectl create -f ./kubernetes/kube-state-metrics-role-binding.yaml +kubectl create -f ./kubernetes/kube-state-metrics-role.yaml + +kubectl create -f ./kubernetes/kube-state-metrics-deployment.yaml + +kubectl create -f ./kubernetes/kube-state-metrics-service.yaml + +kubectl create -f ./tests/manifests/ + +echo "make requests to kube-state-metrics" + +set +e + +is_kube_state_metrics_running="false" + +kubectl proxy & + +# this for loop waits until kube-state-metrics is running by accessing the healthz endpoint +for i in {1..30}; do # timeout for 1 minutes + KUBE_STATE_METRICS_STATUS=$(curl -s "http://localhost:8001/api/v1/namespaces/kube-system/services/kube-state-metrics:http-metrics/proxy/healthz") + if [ "$KUBE_STATE_METRICS_STATUS" == "ok" ]; then + is_kube_state_metrics_running="true" + break + fi + + echo "waiting for Kube-state-metrics up" + sleep 2 +done + +if [ $is_kube_state_metrics_running != "true" ]; then + kubectl --namespace=kube-system logs deployment/kube-state-metrics kube-state-metrics + echo "kube-state-metrics does not start within 1 minute" + exit 1 +fi + +set -e + +echo "kube-state-metrics is up and running" + +echo "access kube-state-metrics metrics endpoint" +curl -s "http://localhost:8001/api/v1/namespaces/kube-system/services/kube-state-metrics:http-metrics/proxy/metrics" >$KUBE_STATE_METRICS_LOG_DIR/metrics + +echo "check metrics format with promtool" +[ -n "$E2E_SETUP_PROMTOOL" ] && setup_promtool +cat $KUBE_STATE_METRICS_LOG_DIR/metrics | promtool check metrics + +collectors=$(find pkg/collectors/ -maxdepth 1 -name "*.go" -not -name "*_test.go" -not -name "collectors.go" -not -name "builder.go" -not -name "testutils.go" -not -name "utils.go" | xargs -n1 basename | awk -F. '{print $1}') +echo "available collectors: $collectors" +for collector in $collectors; do + echo "checking that kube_${collector}* metrics exists" + grep "^kube_${collector}_" $KUBE_STATE_METRICS_LOG_DIR/metrics +done + +KUBE_STATE_METRICS_STATUS=$(curl -s "http://localhost:8001/api/v1/namespaces/kube-system/services/kube-state-metrics:http-metrics/proxy/healthz") +if [ "$KUBE_STATE_METRICS_STATUS" == "ok" ]; then + echo "kube-state-metrics is still running after accessing metrics endpoint" + exit 0 +fi + +# wait for glog to flush to log file +sleep 33 +kubectl --namespace=kube-system logs deployment/kube-state-metrics kube-state-metrics +exit 1 diff --git a/tests/lib/doc.go b/tests/lib/doc.go new file mode 100644 index 0000000000..e1c49c36d7 --- /dev/null +++ b/tests/lib/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package lib enables other projects to reuse the performance optimized metric +exposition logic. While this package ensures this use-case is possible, it does +not give any stability guarantees for the interface. +*/ +package lib diff --git a/tests/lib/lib_test.go b/tests/lib/lib_test.go new file mode 100644 index 0000000000..214633fd89 --- /dev/null +++ b/tests/lib/lib_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lib + +import ( + "context" + "strings" + "testing" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/kube-state-metrics/pkg/collectors" + "k8s.io/kube-state-metrics/pkg/metrics" + metricsstore "k8s.io/kube-state-metrics/pkg/metrics_store" +) + +func TestAsLibrary(t *testing.T) { + kubeClient := fake.NewSimpleClientset() + + service := v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-service", + ResourceVersion: "123456", + }, + } + + _, err := kubeClient.CoreV1().Services(metav1.NamespaceDefault).Create(&service) + if err != nil { + t.Fatal(err) + } + + c := serviceCollector(kubeClient) + + // Wait for informers to sync + time.Sleep(time.Second) + + w := strings.Builder{} + c.Collect(&w) + m := w.String() + + if !strings.Contains(m, service.ObjectMeta.Name) { + t.Fatal("expected string to contain service name") + } +} + +func serviceCollector(kubeClient clientset.Interface) *collectors.Collector { + store := metricsstore.NewMetricsStore([]string{"test_metric describes a test metric"}, generateServiceMetrics) + + lw := cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return kubeClient.CoreV1().Services(metav1.NamespaceDefault).List(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return kubeClient.CoreV1().Services(metav1.NamespaceDefault).Watch(opts) + }, + } + + r := cache.NewReflector(&lw, &v1.Service{}, store, 0) + + go r.Run(context.TODO().Done()) + + return collectors.NewCollector(store) +} + +func generateServiceMetrics(obj interface{}) []metricsstore.FamilyStringer { + sPointer := obj.(*v1.Service) + s := *sPointer + + m := metrics.Metric{ + Name: "test_metric", + LabelKeys: []string{"name"}, + LabelValues: []string{s.Name}, + Value: 1, + } + + family := metrics.Family{&m} + + return []metricsstore.FamilyStringer{family} +} diff --git a/tests/manifests/cronjob.yaml b/tests/manifests/cronjob.yaml new file mode 100644 index 0000000000..d08da4f458 --- /dev/null +++ b/tests/manifests/cronjob.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: cronjob + namespace: default +spec: + schedule: "@hourly" + jobTemplate: + spec: + template: + spec: + containers: + - name: cronjob + image: nonexisting + restartPolicy: OnFailure diff --git a/tests/manifests/daemonset.yaml b/tests/manifests/daemonset.yaml new file mode 100644 index 0000000000..c4141e273c --- /dev/null +++ b/tests/manifests/daemonset.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1beta2 +kind: DaemonSet +metadata: + name: daemonset + namespace: default +spec: + selector: + matchLabels: + name: daemonset + template: + metadata: + labels: + name: daemonset + spec: + containers: + - name: daemonset + image: nonexisting diff --git a/tests/manifests/hpa.yaml b/tests/manifests/hpa.yaml new file mode 100644 index 0000000000..9ecff384b0 --- /dev/null +++ b/tests/manifests/hpa.yaml @@ -0,0 +1,13 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: hpa + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1beta1 + kind: Deployment + name: deployment + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 50 diff --git a/tests/manifests/job.yaml b/tests/manifests/job.yaml new file mode 100644 index 0000000000..7f74e75897 --- /dev/null +++ b/tests/manifests/job.yaml @@ -0,0 +1,14 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: job + namespace: default +spec: + template: + metadata: + name: job + spec: + containers: + - name: job + image: nonexisting + restartPolicy: Never diff --git a/tests/manifests/limitrange.yaml b/tests/manifests/limitrange.yaml new file mode 100644 index 0000000000..7fb589488b --- /dev/null +++ b/tests/manifests/limitrange.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limitrange +spec: + limits: + - default: + memory: 1000Mi + defaultRequest: + memory: 1Mi + type: Container diff --git a/tests/manifests/persistentvolume.yaml b/tests/manifests/persistentvolume.yaml new file mode 100644 index 0000000000..eccb89f39f --- /dev/null +++ b/tests/manifests/persistentvolume.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: persistentvolume +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 5Gi + hostPath: + path: /data/pv0001/ diff --git a/tests/manifests/persistentvolumeclaim.yaml b/tests/manifests/persistentvolumeclaim.yaml new file mode 100644 index 0000000000..565e4cfa55 --- /dev/null +++ b/tests/manifests/persistentvolumeclaim.yaml @@ -0,0 +1,16 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: persistentvolumeclaim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: slow + selector: + matchLabels: + release: "stable" + matchExpressions: + - {key: environment, operator: In, values: [dev]} diff --git a/tests/manifests/poddisruptionbudget.yaml b/tests/manifests/poddisruptionbudget.yaml new file mode 100644 index 0000000000..cb980b7b83 --- /dev/null +++ b/tests/manifests/poddisruptionbudget.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: pdb +spec: + minAvailable: "50%" + selector: + matchLabels: + name: pdb diff --git a/tests/manifests/replicationcontroller.yaml b/tests/manifests/replicationcontroller.yaml new file mode 100644 index 0000000000..0aaecdbd8a --- /dev/null +++ b/tests/manifests/replicationcontroller.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: replicationcontroller +spec: + selector: + app: replicationcontroller + template: + metadata: + name: replicationcontroller + labels: + app: replicationcontroller + spec: + containers: + - name: replicationcontroller + image: nonexisting diff --git a/tests/manifests/resourcequota.yaml b/tests/manifests/resourcequota.yaml new file mode 100644 index 0000000000..b1a5edcaf3 --- /dev/null +++ b/tests/manifests/resourcequota.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: resourcequota +spec: + hard: + configmaps: "10" diff --git a/tests/manifests/statefulset.yaml b/tests/manifests/statefulset.yaml new file mode 100644 index 0000000000..4d5451b889 --- /dev/null +++ b/tests/manifests/statefulset.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: statefulset +spec: + selector: + matchLabels: + app: statefulset + serviceName: statefulset + template: + metadata: + labels: + app: statefulset + spec: + containers: + - name: statefulset + image: nonexisting diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000000..c364af1da0 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000000..07509ccb7c --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,34 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +Johan Euphrosine +Jonathan Amsterdam +Luna Duclos +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000000..a4c5efd822 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000000..5c6f3bf382 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,438 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "cloud.google.com/go/internal" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 0000000000..8e0c8f8e52 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 0000000000..b9d6a27ea9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 0000000000..7b0c4bc4d2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 0000000000..f570d540a6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,81 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + // it's legal for tenantID to be empty so don't validate it + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 0000000000..b38f4c2458 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,242 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 0000000000..9e15f2751f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 0000000000..0e5ad14d39 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 0000000000..24641b621e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,762 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string + Password string +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string + AuthorizationCode string + RedirectURI string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + token Token + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshLock *sync.RWMutex + refreshWithin time.Duration + sender Sender + + refreshCallbacks []TokenRefreshCallback +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshLock: &sync.RWMutex{}, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + v.Set("api-version", "2018-02-01") + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + oauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshLock: &sync.RWMutex{}, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + if userAssignedID != nil { + spt.clientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.autoRefresh && spt.token.WillExpireIn(spt.refreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.token.WillExpireIn(spt.refreshWithin) { + return spt.refreshInternal(spt.resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return u.Host == imds.Host && u.Path == imds.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + + if !isIMDS(spt.oauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.token.RefreshToken) + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.oauthConfig.TokenEndpoint) { + resp, err = retry(spt.sender, req) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.token = token + + return spt.InvokeRefreshCallbacks(token) +} + +func retry(sender Sender, req *http.Request) (resp *http.Response, err error) { + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // Extra retry status codes requered + retries = append(retries, http.StatusNotFound, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + attempt := 0 + maxAttempts := 5 + + for attempt < maxAttempts { + resp, err = sender.Do(req) + if err != nil { + return + } + + if resp.StatusCode == http.StatusOK { + return + } + if containsInt(retries, resp.StatusCode) { + delayed := false + if resp.StatusCode == http.StatusTooManyRequests { + delayed = delay(resp, req.Cancel) + } + if !delayed { + time.Sleep(time.Second) + attempt++ + } + } else { + return + } + } + return +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func delay(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.token +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 0000000000..c51eac0a78 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,257 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + refresher, ok := ba.tokenProvider.(adal.Refresher) + if ok { + err := refresher.EnsureFresh() + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 0000000000..aafdf021fd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 0000000000..a58e5ef3f1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,511 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + req *http.Request + resp *http.Response + ps pollingState +} + +// NewFuture returns a new Future object initialized with the specified request. +func NewFuture(req *http.Request) Future { + return Future{req: req} +} + +// Response returns the last HTTP response or nil if there isn't one. +func (f Future) Response() *http.Response { + return f.resp +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.ps.State == "" { + return "Unknown" + } + return f.ps.State +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + return f.ps.PollingMethod +} + +// Done queries the service to see if the operation has completed. +func (f *Future) Done(sender autorest.Sender) (bool, error) { + // exit early if this future has terminated + if f.ps.hasTerminated() { + return true, f.errorInfo() + } + resp, err := sender.Do(f.req) + f.resp = resp + if err != nil { + return false, err + } + + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + // check response body for error content + if resp.Body != nil { + type respErr struct { + ServiceError ServiceError `json:"error"` + } + re := respErr{} + + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, err + } + err = json.Unmarshal(b, &re) + if err != nil { + return false, err + } + return false, re.ServiceError + } + + // try to return something meaningful + return false, ServiceError{ + Code: fmt.Sprintf("%v", resp.StatusCode), + Message: resp.Status, + } + } + + err = updatePollingState(resp, &f.ps) + if err != nil { + return false, err + } + + if f.ps.hasTerminated() { + return true, f.errorInfo() + } + + f.req, err = newPollingRequest(f.ps) + return false, err +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.resp == nil { + return 0, false + } + + retry := f.resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletion will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { + ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) + defer cancel() + + done, err := f.Done(client) + for attempts := 0; !done; done, err = f.Done(client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled") + } + } + return err +} + +// if the operation failed the polling state will contain +// error information and implements the error interface +func (f *Future) errorInfo() error { + if !f.ps.hasSucceeded() { + return f.ps + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(&f.ps) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + err := json.Unmarshal(data, &f.ps) + if err != nil { + return err + } + f.req, err = newPollingRequest(f.ps) + return err +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +// For LROs that use the Location header the final URL value is used to retrieve the result. +func (f Future) PollingURL() string { + return f.ps.URI +} + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(ps) + if err != nil { + return resp, err + } + r = r.WithContext(resp.Request.Context()) + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return strings.EqualFold(state, operationSucceeded) +} + +func hasTerminated(state string) bool { + return strings.EqualFold(state, operationCanceled) || strings.EqualFold(state, operationFailed) || strings.EqualFold(state, operationSucceeded) +} + +func hasFailed(state string) bool { + return strings.EqualFold(state, operationFailed) +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + // code and message are required fields so only check them + return len(ps.ProvisioningError.Code) > 0 || + len(ps.ProvisioningError.Message) > 0 +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +type pollingState struct { + PollingMethod PollingMethodType `json:"pollingMethod"` + URI string `json:"uri"` + State string `json:"state"` + ServiceError *ServiceError `json:"error,omitempty"` +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.State) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.State) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.State) +} + +func (ps pollingState) Error() string { + s := fmt.Sprintf("Long running operation terminated with status '%s'", ps.State) + if ps.ServiceError != nil { + s = fmt.Sprintf("%s: %+v", s, *ps.ServiceError) + } + return s +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.PollingMethod == PollingAsyncOperation { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.PollingMethod == PollingUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.URI = getAsyncOperation(resp) + if ps.URI != "" { + ps.PollingMethod = PollingAsyncOperation + } else { + ps.PollingMethod = PollingLocation + } + + // Else, use the Location header + if ps.URI == "" { + ps.URI = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.URI == "" { + m := strings.ToUpper(req.Method) + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { + ps.URI = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.State = pt.state() + } else if pt.state() != "" { + ps.State = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.State = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.State = operationSucceeded + + default: + ps.State = operationFailed + } + } + + if strings.EqualFold(ps.State, operationInProgress) && ps.URI == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if or, ok := pt.(*operationResource); ok { + ps.ServiceError = &or.OperationError + } else if p, ok := pt.(*provisioningStatus); ok && p.hasProvisioningError() { + ps.ServiceError = &p.ProvisioningError + } else { + ps.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "None", + } + } + } + return nil +} + +func newPollingRequest(ps pollingState) (*http.Request, error) { + reqPoll, err := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL(ps.URI)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI) + } + + return reqPoll, nil +} + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 0000000000..18d029526f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,301 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + } + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 0000000000..7e41f7fd99 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,191 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +// EnvironmentFilepathName captures the name of the environment variable containing the path to the file +// to be used while populating the Azure Environment. +const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + TokenAudience string `json:"tokenAudience"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.azure.com/", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.usgovcloudapi.net/", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.chinacloudapi.cn/", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.microsoftazure.de/", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 0000000000..507f9e95cf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 0000000000..65ad0afc82 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,200 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, fmt.Errorf("failed request: %s", err) + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 0000000000..4e92dcad07 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,264 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "runtime" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: defaultUserAgent, + } + c.Sender = c.sender() + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + + resp, err := SendWithSender(c.sender(), r) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 0000000000..c457106568 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 0000000000..b453fad049 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 0000000000..48fb39ba9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 0000000000..7073959b2a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 0000000000..12addf0ebb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 0000000000..f724f33327 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 0000000000..6d67bd7337 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,480 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + + v := r.URL.Query() + for key, value := range parameters { + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 0000000000..a908a0adb7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,250 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 0000000000..fa11dbed79 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 0000000000..7143cc61b5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 0000000000..ae15c6bf96 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 0000000000..b4f762325f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,321 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + // don't count a 429 against the number of attempts + // so that we continue to retry until it succeeds + if resp == nil || resp.StatusCode != http.StatusTooManyRequests { + attempt++ + } + } + return resp, err + }) + } +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 0000000000..afb3e4e161 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,218 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the seperator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 0000000000..4ad7754ada --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,20 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return "v10.5.0" +} diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c8073..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index facfc91c65..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea7..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index a78a3df651..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,185 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index b79da64b32..0000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,375 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/secure/precis" - "golang.org/x/text/unicode/norm" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - if parsed, e := url.Parse(u); e != nil { - return "", e - } else { - options := make([]precis.Option, 1, 3) - options[0] = precis.IgnoreCase - if f&FlagLowercaseHost == FlagLowercaseHost { - options = append(options, precis.FoldCase()) - } - options = append(options, precis.Norm(norm.NFC)) - profile := precis.NewFreeform(options...) - if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil { - return "", e - } - return NormalizeURL(parsed, f), nil - } - panic("Unreachable code.") -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k, _ := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index 478630e505..0000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - 1.4 - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index bebe305e0d..0000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b84624594..0000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index 2a7cfd2bf6..c836416192 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2012-2013 Dave Collins +ISC License + +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 565bf5899f..8a4a6589a2 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 457e41235e..1fe3cf3d5d 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above @@ -13,9 +13,10 @@ // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc15b..1be8ce9457 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index ee1ab07b3f..2e3d22f312 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -64,9 +64,18 @@ type ConfigState struct { // inside these interface methods. As a result, this option relies on // access to the unsafe package, so it will not have any effect when // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. + // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c40609..aacaac6f1e 100644 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index a0ff95e27e..df1d582a72 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80e24..c49875bacb 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f542e..32c0e33882 100644 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 0000000000..80bed650ec --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 0000000000..bde823d8ab --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 0000000000..df83a9c2f0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 0000000000..fd62e94905 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,96 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 0000000000..f48365fafb --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,85 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. + + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 0000000000..b605b45093 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,105 @@ +## `jwt-go` Version History + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 0000000000..f0228f02e0 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 0000000000..a86dc1a3b3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 0000000000..2f59a22236 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 0000000000..d19624b726 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 0000000000..662df19d4e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,63 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } + return e.Inner.Error() +} + +// No errors +func (e *ValidationError) valid() bool { + if e.Errors > 0 { + return false + } + return true +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 0000000000..c229919254 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 0000000000..291213c460 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 0000000000..f04d189d06 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 0000000000..7bf1c4ea08 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,131 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + var err error + token := &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 0000000000..0ae0b1984e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 0000000000..10ee9db8a4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 0000000000..213a90dbbf --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 0000000000..ed1f212b21 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 0000000000..d637e0867c --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore deleted file mode 100644 index cece7be664..0000000000 --- a/vendor/github.com/emicklei/go-restful/.gitignore +++ /dev/null @@ -1,70 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -restful.html - -*.out - -tmp.prof - -go-restful.test - -examples/restful-basic-authentication - -examples/restful-encoding-filter - -examples/restful-filters - -examples/restful-hello-world - -examples/restful-resource-functions - -examples/restful-serve-static - -examples/restful-user-service - -*.DS_Store -examples/restful-user-resource - -examples/restful-multi-containers - -examples/restful-form-handling - -examples/restful-CORS-filter - -examples/restful-options-filter - -examples/restful-curly-router - -examples/restful-cpuprofiler-service - -examples/restful-pre-post-filters - -curly.prof - -examples/restful-NCSA-logging - -examples/restful-html-template - -s.html -restful-path-tail diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md deleted file mode 100644 index 070bca7cdc..0000000000 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ /dev/null @@ -1,163 +0,0 @@ -Change history of go-restful -= -2016-02-14 -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 -- add configurable logging - -2015-03-18 -- if not specified, the Operation is derived from the Route function - -2015-03-17 -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 -- (api add) expose constructor of Request for testing. - -2014-06-27 -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - - (api change) Write* methods in Response now return the error or nil. - - added example of serving HTML from a Go template. - - fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - - (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - - (api add) Response knows what HTTP status has been written - - (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - - (api change) Router interface simplified - - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - - fixed some reported issues (see github) - - (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. - - (api add) the swagger package has be extended to have a UI per container. - - if panic is detected then a small stack trace is printed (thanks to runner-mei) - - (api add) WriteErrorString to Response - -Important API changes: - - - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. - - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - - - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - - - (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - - - (api change) removed Dispatcher interface, hide PathExpression - - changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - - - (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - - - (api add) Added support for request/response filter functions - -2013-05-18 - - - - (api add) Added feature to change the default Http Request Dispatch function (travis cline) - - (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - - - See https://github.com/emicklei/go-restful/commits - -2012-11-14 - - - Initial commit - - diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE deleted file mode 100644 index ece7ec61ef..0000000000 --- a/vendor/github.com/emicklei/go-restful/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md deleted file mode 100644 index cfe6d0a916..0000000000 --- a/vendor/github.com/emicklei/go-restful/README.md +++ /dev/null @@ -1,74 +0,0 @@ -go-restful -========== - -package for building REST-style Web Services using Google Go - -REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: - -- GET = Retrieve a representation of a resource -- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. -- PUT = Create if you are sending the full content of the specified resource (URI). -- PUT = Update if you are updating the full content of the specified resource. -- DELETE = Delete if you are requesting the server to delete the resource -- PATCH = Update partial content of a resource -- OPTIONS = Get information about the communication options for the request URI - -### Example - -```Go -ws := new(restful.WebService) -ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - -ws.Route(ws.GET("/{user-id}").To(u.findUser). - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) -... - -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... -} -``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) - -### Features - -- Routes for request → function mapping with path parameter (e.g. {id}) support -- Configurable router: - - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default) - - Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter) -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) -- Response API for writing structs to JSON/XML and setting headers -- Filters for intercepting the request → response flow on Service or Route level -- Request-scoped variables using attributes -- Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of request and response payloads -- Automatic responses on OPTIONS (using a filter) -- Automatic CORS request handling (using a filter) -- API declaration for Swagger UI (see swagger package) -- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) -- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) -- Configurable (trace) logging -- Customizable encoding using EntityReaderWriter registration -- Customizable gzip/deflate readers and writers using CompressorProvider registration - -### Resources - -- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful) -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) -- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) -- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) -- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) -- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1) -- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - -[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest) - -(c) 2012 - 2015, http://ernestmicklei.com. MIT License - -Type ```git shortlog -s``` for a full list of contributors. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile deleted file mode 100644 index 16fd186892..0000000000 --- a/vendor/github.com/emicklei/go-restful/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh deleted file mode 100644 index 47ffbe4ac9..0000000000 --- a/vendor/github.com/emicklei/go-restful/bench_test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out - -go test -c -./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany -./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly - -#go tool pprof go-restful.test tmp.prof -go tool pprof go-restful.test curly.prof - - diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go deleted file mode 100644 index 220b37712f..0000000000 --- a/vendor/github.com/emicklei/go-restful/compress.go +++ /dev/null @@ -1,123 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "compress/gzip" - "compress/zlib" - "errors" - "io" - "net" - "net/http" - "strings" -) - -// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. -var EnableContentEncoding = false - -// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) -type CompressingResponseWriter struct { - writer http.ResponseWriter - compressor io.WriteCloser - encoding string -} - -// Header is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) Header() http.Header { - return c.writer.Header() -} - -// WriteHeader is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) WriteHeader(status int) { - c.writer.WriteHeader(status) -} - -// Write is part of http.ResponseWriter interface -// It is passed through the compressor -func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { - if c.isCompressorClosed() { - return -1, errors.New("Compressing error: tried to write data using closed compressor") - } - return c.compressor.Write(bytes) -} - -// CloseNotify is part of http.CloseNotifier interface -func (c *CompressingResponseWriter) CloseNotify() <-chan bool { - return c.writer.(http.CloseNotifier).CloseNotify() -} - -// Close the underlying compressor -func (c *CompressingResponseWriter) Close() error { - if c.isCompressorClosed() { - return errors.New("Compressing error: tried to close already closed compressor") - } - - c.compressor.Close() - if ENCODING_GZIP == c.encoding { - currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) - } - if ENCODING_DEFLATE == c.encoding { - currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) - } - // gc hint needed? - c.compressor = nil - return nil -} - -func (c *CompressingResponseWriter) isCompressorClosed() bool { - return nil == c.compressor -} - -// Hijack implements the Hijacker interface -// This is especially useful when combining Container.EnabledContentEncoding -// in combination with websockets (for instance gorilla/websocket) -func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := c.writer.(http.Hijacker) - if !ok { - return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") - } - return hijacker.Hijack() -} - -// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { - header := httpRequest.Header.Get(HEADER_AcceptEncoding) - gi := strings.Index(header, ENCODING_GZIP) - zi := strings.Index(header, ENCODING_DEFLATE) - // use in order of appearance - if gi == -1 { - return zi != -1, ENCODING_DEFLATE - } else if zi == -1 { - return gi != -1, ENCODING_GZIP - } else { - if gi < zi { - return true, ENCODING_GZIP - } - return true, ENCODING_DEFLATE - } -} - -// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} -func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { - httpWriter.Header().Set(HEADER_ContentEncoding, encoding) - c := new(CompressingResponseWriter) - c.writer = httpWriter - var err error - if ENCODING_GZIP == encoding { - w := currentCompressorProvider.AcquireGzipWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_GZIP - } else if ENCODING_DEFLATE == encoding { - w := currentCompressorProvider.AcquireZlibWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_DEFLATE - } else { - return nil, errors.New("Unknown encoding:" + encoding) - } - return c, err -} diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go deleted file mode 100644 index ee426010a2..0000000000 --- a/vendor/github.com/emicklei/go-restful/compressor_cache.go +++ /dev/null @@ -1,103 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount -// of writers and readers (resources). -// If a new resource is acquired and all are in use, it will return a new unmanaged resource. -type BoundedCachedCompressors struct { - gzipWriters chan *gzip.Writer - gzipReaders chan *gzip.Reader - zlibWriters chan *zlib.Writer - writersCapacity int - readersCapacity int -} - -// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. -func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { - b := &BoundedCachedCompressors{ - gzipWriters: make(chan *gzip.Writer, writersCapacity), - gzipReaders: make(chan *gzip.Reader, readersCapacity), - zlibWriters: make(chan *zlib.Writer, writersCapacity), - writersCapacity: writersCapacity, - readersCapacity: readersCapacity, - } - for ix := 0; ix < writersCapacity; ix++ { - b.gzipWriters <- newGzipWriter() - b.zlibWriters <- newZlibWriter() - } - for ix := 0; ix < readersCapacity; ix++ { - b.gzipReaders <- newGzipReader() - } - return b -} - -// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { - var writer *gzip.Writer - select { - case writer, _ = <-b.gzipWriters: - default: - // return a new unmanaged one - writer = newGzipWriter() - } - return writer -} - -// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { - // forget the unmanaged ones - if len(b.gzipWriters) < b.writersCapacity { - b.gzipWriters <- w - } -} - -// AcquireGzipReader returns a *gzip.Reader. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { - var reader *gzip.Reader - select { - case reader, _ = <-b.gzipReaders: - default: - // return a new unmanaged one - reader = newGzipReader() - } - return reader -} - -// ReleaseGzipReader accepts a reader (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { - // forget the unmanaged ones - if len(b.gzipReaders) < b.readersCapacity { - b.gzipReaders <- r - } -} - -// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { - var writer *zlib.Writer - select { - case writer, _ = <-b.zlibWriters: - default: - // return a new unmanaged one - writer = newZlibWriter() - } - return writer -} - -// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { - // forget the unmanaged ones - if len(b.zlibWriters) < b.writersCapacity { - b.zlibWriters <- w - } -} diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go deleted file mode 100644 index d866ce64bb..0000000000 --- a/vendor/github.com/emicklei/go-restful/compressor_pools.go +++ /dev/null @@ -1,91 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "sync" -) - -// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. -type SyncPoolCompessors struct { - GzipWriterPool *sync.Pool - GzipReaderPool *sync.Pool - ZlibWriterPool *sync.Pool -} - -// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. -func NewSyncPoolCompessors() *SyncPoolCompessors { - return &SyncPoolCompessors{ - GzipWriterPool: &sync.Pool{ - New: func() interface{} { return newGzipWriter() }, - }, - GzipReaderPool: &sync.Pool{ - New: func() interface{} { return newGzipReader() }, - }, - ZlibWriterPool: &sync.Pool{ - New: func() interface{} { return newZlibWriter() }, - }, - } -} - -func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { - return s.GzipWriterPool.Get().(*gzip.Writer) -} - -func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { - s.GzipWriterPool.Put(w) -} - -func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { - return s.GzipReaderPool.Get().(*gzip.Reader) -} - -func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { - s.GzipReaderPool.Put(r) -} - -func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { - return s.ZlibWriterPool.Get().(*zlib.Writer) -} - -func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { - s.ZlibWriterPool.Put(w) -} - -func newGzipWriter() *gzip.Writer { - // create with an empty bytes writer; it will be replaced before using the gzipWriter - writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} - -func newGzipReader() *gzip.Reader { - // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader - // we can safely use currentCompressProvider because it is set on package initialization. - w := currentCompressorProvider.AcquireGzipWriter() - defer currentCompressorProvider.ReleaseGzipWriter(w) - b := new(bytes.Buffer) - w.Reset(b) - w.Flush() - w.Close() - reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) - if err != nil { - panic(err.Error()) - } - return reader -} - -func newZlibWriter() *zlib.Writer { - writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go deleted file mode 100644 index f028456e0f..0000000000 --- a/vendor/github.com/emicklei/go-restful/compressors.go +++ /dev/null @@ -1,53 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -type CompressorProvider interface { - // Returns a *gzip.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireGzipWriter() *gzip.Writer - - // Releases an aqcuired *gzip.Writer. - ReleaseGzipWriter(w *gzip.Writer) - - // Returns a *gzip.Reader which needs to be released later. - AcquireGzipReader() *gzip.Reader - - // Releases an aqcuired *gzip.Reader. - ReleaseGzipReader(w *gzip.Reader) - - // Returns a *zlib.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireZlibWriter() *zlib.Writer - - // Releases an aqcuired *zlib.Writer. - ReleaseZlibWriter(w *zlib.Writer) -} - -// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). -var currentCompressorProvider CompressorProvider - -func init() { - currentCompressorProvider = NewSyncPoolCompessors() -} - -// CurrentCompressorProvider returns the current CompressorProvider. -// It is initialized using a SyncPoolCompessors. -func CurrentCompressorProvider() CompressorProvider { - return currentCompressorProvider -} - -// CompressorProvider sets the actual provider of compressors (zlib or gzip). -func SetCompressorProvider(p CompressorProvider) { - if p == nil { - panic("cannot set compressor provider to nil") - } - currentCompressorProvider = p -} diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go deleted file mode 100644 index 203439c5e5..0000000000 --- a/vendor/github.com/emicklei/go-restful/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default - - HEADER_Allow = "Allow" - HEADER_Accept = "Accept" - HEADER_Origin = "Origin" - HEADER_ContentType = "Content-Type" - HEADER_LastModified = "Last-Modified" - HEADER_AcceptEncoding = "Accept-Encoding" - HEADER_ContentEncoding = "Content-Encoding" - HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" - HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" - HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" - HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" - HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" - HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" - HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" - HEADER_AccessControlMaxAge = "Access-Control-Max-Age" - - ENCODING_GZIP = "gzip" - ENCODING_DEFLATE = "deflate" -) diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go deleted file mode 100644 index 4e53cccb93..0000000000 --- a/vendor/github.com/emicklei/go-restful/container.go +++ /dev/null @@ -1,361 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "strings" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. -// The requests are further dispatched to routes of WebServices using a RouteSelector -type Container struct { - webServicesLock sync.RWMutex - webServices []*WebService - ServeMux *http.ServeMux - isRegisteredOnRoot bool - containerFilters []FilterFunction - doNotRecover bool // default is false - recoverHandleFunc RecoverHandleFunction - serviceErrorHandleFunc ServiceErrorHandleFunction - router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative - contentEncodingEnabled bool // default is false -} - -// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311) -func NewContainer() *Container { - return &Container{ - webServices: []*WebService{}, - ServeMux: http.NewServeMux(), - isRegisteredOnRoot: false, - containerFilters: []FilterFunction{}, - doNotRecover: false, - recoverHandleFunc: logStackOnRecover, - serviceErrorHandleFunc: writeServiceError, - router: RouterJSR311{}, - contentEncodingEnabled: false} -} - -// RecoverHandleFunction declares functions that can be used to handle a panic situation. -// The first argument is what recover() returns. The second must be used to communicate an error response. -type RecoverHandleFunction func(interface{}, http.ResponseWriter) - -// RecoverHandler changes the default function (logStackOnRecover) to be called -// when a panic is detected. DoNotRecover must be have its default value (=false). -func (c *Container) RecoverHandler(handler RecoverHandleFunction) { - c.recoverHandleFunc = handler -} - -// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. -// The first argument is the service error, the second is the request that resulted in the error and -// the third must be used to communicate an error response. -type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) - -// ServiceErrorHandler changes the default function (writeServiceError) to be called -// when a ServiceError is detected. -func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { - c.serviceErrorHandleFunc = handler -} - -// DoNotRecover controls whether panics will be caught to return HTTP 500. -// If set to true, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -func (c *Container) DoNotRecover(doNot bool) { - c.doNotRecover = doNot -} - -// Router changes the default Router (currently RouterJSR311) -func (c *Container) Router(aRouter RouteSelector) { - c.router = aRouter -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. -func (c *Container) EnableContentEncoding(enabled bool) { - c.contentEncodingEnabled = enabled -} - -// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. -func (c *Container) Add(service *WebService) *Container { - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - - // if rootPath was not set then lazy initialize it - if len(service.rootPath) == 0 { - service.Path("/") - } - - // cannot have duplicate root paths - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - log.Printf("[restful] WebService with duplicate root path detected:['%v']", each) - os.Exit(1) - } - } - - // If not registered on root then add specific mapping - if !c.isRegisteredOnRoot { - c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) - } - c.webServices = append(c.webServices, service) - return c -} - -// addHandler may set a new HandleFunc for the serveMux -// this function must run inside the critical region protected by the webServicesLock. -// returns true if the function was registered on root ("/") -func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { - pattern := fixedPrefixPath(service.RootPath()) - // check if root path registration is needed - if "/" == pattern || "" == pattern { - serveMux.HandleFunc("/", c.dispatch) - return true - } - // detect if registration already exists - alreadyMapped := false - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - alreadyMapped = true - break - } - } - if !alreadyMapped { - serveMux.HandleFunc(pattern, c.dispatch) - if !strings.HasSuffix(pattern, "/") { - serveMux.HandleFunc(pattern+"/", c.dispatch) - } - } - return false -} - -func (c *Container) Remove(ws *WebService) error { - if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) - log.Printf(errMsg) - return errors.New(errMsg) - } - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - // build a new ServeMux and re-register all WebServices - newServeMux := http.NewServeMux() - newServices := []*WebService{} - newIsRegisteredOnRoot := false - for _, each := range c.webServices { - if each.rootPath != ws.rootPath { - // If not registered on root then add specific mapping - if !newIsRegisteredOnRoot { - newIsRegisteredOnRoot = c.addHandler(each, newServeMux) - } - newServices = append(newServices, each) - } - } - c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot - return nil -} - -// logStackOnRecover is the default RecoverHandleFunction and is called -// when DoNotRecover is false and the recoverHandleFunc is not set for the container. -// Default implementation logs the stacktrace and writes the stacktrace on the response. -// This may be a security issue as it exposes sourcecode information. -func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason)) - for i := 2; ; i += 1 { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) - } - log.Print(buffer.String()) - httpWriter.WriteHeader(http.StatusInternalServerError) - httpWriter.Write(buffer.Bytes()) -} - -// writeServiceError is the default ServiceErrorHandleFunction and is called -// when a ServiceError is returned during route selection. Default implementation -// calls resp.WriteErrorString(err.Code, err.Message) -func writeServiceError(err ServiceError, req *Request, resp *Response) { - resp.WriteErrorString(err.Code, err.Message) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - writer := httpWriter - - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - // Instal panic recovery unless told otherwise - if !c.doNotRecover { // catch all for 500 response - defer func() { - if r := recover(); r != nil { - c.recoverHandleFunc(r, writer) - return - } - }() - } - // Install closing the request body (if any) - defer func() { - if nil != httpRequest.Body { - httpRequest.Body.Close() - } - }() - - // Detect if compression is needed - // assume without compression, test for override - if c.contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("[restful] unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() - if err != nil { - // a non-200 response has already been written - // run container filters anyway ; they should not touch the response... - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - switch err.(type) { - case ServiceError: - ser := err.(ServiceError) - c.serviceErrorHandleFunc(ser, req, resp) - } - // TODO - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) - return - } - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest) - // pass through filters (if any) - if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { - // compose filter chain - allFilters := []FilterFunction{} - allFilters = append(allFilters, c.containerFilters...) - allFilters = append(allFilters, webService.filters...) - allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { - // handle request by route after passing all filters - route.Function(wrappedRequest, wrappedResponse) - }} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // no filters, handle request by route - route.Function(wrappedRequest, wrappedResponse) - } -} - -// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} -func fixedPrefixPath(pathspec string) string { - varBegin := strings.Index(pathspec, "{") - if -1 == varBegin { - return pathspec - } - return pathspec[:varBegin] -} - -// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { - c.ServeMux.ServeHTTP(httpwriter, httpRequest) -} - -// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. -func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, handler) -} - -// HandleWithFilter registers the handler for the given pattern. -// Container's filter chain is applied for handler. -// If a handler already exists for pattern, HandleWithFilter panics. -func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { - f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { - if len(c.containerFilters) == 0 { - handler.ServeHTTP(httpResponse, httpRequest) - return - } - - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(httpResponse, httpRequest) - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) - } - - c.Handle(pattern, http.HandlerFunc(f)) -} - -// Filter appends a container FilterFunction. These are called before dispatching -// a http.Request to a WebService from the container -func (c *Container) Filter(filter FilterFunction) { - c.containerFilters = append(c.containerFilters, filter) -} - -// RegisteredWebServices returns the collections of added WebServices -func (c *Container) RegisteredWebServices() []*WebService { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - result := make([]*WebService, len(c.webServices)) - for ix := range c.webServices { - result[ix] = c.webServices[ix] - } - return result -} - -// computeAllowedMethods returns a list of HTTP methods that are valid for a Request -func (c *Container) computeAllowedMethods(req *Request) []string { - // Go through all RegisteredWebServices() and all its Routes to collect the options - methods := []string{} - requestPath := req.Request.URL.Path - for _, ws := range c.RegisteredWebServices() { - matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - finalMatch := matches[len(matches)-1] - for _, rt := range ws.Routes() { - matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) - if matches != nil { - lastMatch := matches[len(matches)-1] - if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - methods = append(methods, rt.Method) - } - } - } - } - } - // methods = append(methods, "OPTIONS") not sure about this - return methods -} - -// newBasicRequestResponse creates a pair of Request,Response from its http versions. -// It is basic because no parameter or (produces) content-type information is given. -func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - resp := NewResponse(httpWriter) - resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) - return NewRequest(httpRequest), resp -} diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go deleted file mode 100644 index 1efeef072d..0000000000 --- a/vendor/github.com/emicklei/go-restful/cors_filter.go +++ /dev/null @@ -1,202 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "regexp" - "strconv" - "strings" -) - -// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. -// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page -// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. -// -// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing -// http://enable-cors.org/server.html -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. - AllowedMethods []string - MaxAge int // number of seconds before requiring new Options request - CookiesAllowed bool - Container *Container - - allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. -} - -// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html -// and http://www.html5rocks.com/static/images/cors_server_flowchart.png -func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { - origin := req.Request.Header.Get(HEADER_Origin) - if len(origin) == 0 { - if trace { - traceLogger.Print("no Http header Origin set") - } - chain.ProcessFilter(req, resp) - return - } - if !c.isOriginAllowed(origin) { // check whether this origin is allowed - if trace { - traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) - } - chain.ProcessFilter(req, resp) - return - } - if req.Request.Method != "OPTIONS" { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } - if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { - c.doPreflightRequest(req, resp) - } else { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } -} - -func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { - c.setOptionsHeaders(req, resp) - // continue processing the response -} - -func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { - if len(c.AllowedMethods) == 0 { - if c.Container == nil { - c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) - } else { - c.AllowedMethods = c.Container.computeAllowedMethods(req) - } - } - - acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) - if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestMethod, - acrm, - c.AllowedMethods) - } - return - } - acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - if len(acrhs) > 0 { - for _, each := range strings.Split(acrhs, ",") { - if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestHeaders, - acrhs, - c.AllowedHeaders) - } - return - } - } - } - resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) - resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) - c.setOptionsHeaders(req, resp) - - // return http 200 response, no body -} - -func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { - c.checkAndSetExposeHeaders(resp) - c.setAllowOriginHeader(req, resp) - c.checkAndSetAllowCredentials(resp) - if c.MaxAge > 0 { - resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) - } -} - -func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { - if len(origin) == 0 { - return false - } - if len(c.AllowedDomains) == 0 { - return true - } - - allowed := false - for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break - } - } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } - } - - return allowed -} - -func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { - origin := req.Request.Header.Get(HEADER_Origin) - if c.isOriginAllowed(origin) { - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - } -} - -func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { - if len(c.ExposeHeaders) > 0 { - resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) - } -} - -func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { - if c.CookiesAllowed { - resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") - } -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { - for _, each := range allowedMethods { - if each == method { - return true - } - } - return false -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { - for _, each := range c.AllowedHeaders { - if strings.ToLower(each) == strings.ToLower(header) { - return true - } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err - } - regexps = append(regexps, r) - } - return regexps, nil -} diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh deleted file mode 100644 index e27dbf1a91..0000000000 --- a/vendor/github.com/emicklei/go-restful/coverage.sh +++ /dev/null @@ -1,2 +0,0 @@ -go test -coverprofile=coverage.out -go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go deleted file mode 100644 index 185300dbc7..0000000000 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ /dev/null @@ -1,162 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "regexp" - "sort" - "strings" -) - -// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. -type CurlyRouter struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (c CurlyRouter) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { - - requestTokens := tokenizePath(httpRequest.URL.Path) - - detectedService := c.detectWebService(requestTokens, webServices) - if detectedService == nil { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) - } - return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - candidateRoutes := c.selectRoutes(detectedService, requestTokens) - if len(candidateRoutes) == 0 { - if trace { - traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) - } - return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) - if selectedRoute == nil { - return detectedService, nil, err - } - return detectedService, selectedRoute, nil -} - -// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. -func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := sortableCurlyRoutes{} - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) - if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? - } - } - sort.Sort(sort.Reverse(candidates)) - return candidates -} - -// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { - if len(routeTokens) < len(requestTokens) { - // proceed in matching only if last routeToken is wildcard - count := len(routeTokens) - if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { - return false, 0, 0 - } - // proceed - } - for i, routeToken := range routeTokens { - if i == len(requestTokens) { - // reached end of request path - return false, 0, 0 - } - requestToken := requestTokens[i] - if strings.HasPrefix(routeToken, "{") { - paramCount++ - if colon := strings.Index(routeToken, ":"); colon != -1 { - // match by regex - matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) - if !matchesToken { - return false, 0, 0 - } - if matchesRemainder { - break - } - } - } else { // no { prefix - if requestToken != routeToken { - return false, 0, 0 - } - staticCount++ - } - } - return true, paramCount, staticCount -} - -// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens -// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} -func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { - regPart := routeToken[colon+1 : len(routeToken)-1] - if regPart == "*" { - if trace { - traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) - } - return true, true - } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false -} - -// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type -// headers of the Request. See also RouterJSR311 in jsr311.go -func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { - // tracing is done inside detectRoute - return RouterJSR311{}.detectRoute(candidateRoutes.routes(), httpRequest) -} - -// detectWebService returns the best matching webService given the list of path tokens. -// see also computeWebserviceScore -func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService - score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) - if matches && (eachScore > score) { - best = each - score = eachScore - } - } - return best -} - -// computeWebserviceScore returns whether tokens match and -// the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { - return false, 0 - } - score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { - score++ - continue - } - if len(other) > 0 && strings.HasPrefix(other, "{") { - // no empty match - if len(each) == 0 { - return false, score - } - score += 1 - } else { - // not a parameter - if each != other { - return false, score - } - score += (len(tokens) - i) * 10 //fuzzy - } - } - return true, score -} diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go deleted file mode 100644 index 296f94650e..0000000000 --- a/vendor/github.com/emicklei/go-restful/curly_route.go +++ /dev/null @@ -1,52 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. -type curlyRoute struct { - route Route - paramCount int - staticCount int -} - -type sortableCurlyRoutes []curlyRoute - -func (s *sortableCurlyRoutes) add(route curlyRoute) { - *s = append(*s, route) -} - -func (s sortableCurlyRoutes) routes() (routes []Route) { - for _, each := range s { - routes = append(routes, each.route) // TODO change return type - } - return routes -} - -func (s sortableCurlyRoutes) Len() int { - return len(s) -} -func (s sortableCurlyRoutes) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sortableCurlyRoutes) Less(i, j int) bool { - ci := s[i] - cj := s[j] - - // primary key - if ci.staticCount < cj.staticCount { - return true - } - if ci.staticCount > cj.staticCount { - return false - } - // secundary key - if ci.paramCount < cj.paramCount { - return true - } - if ci.paramCount > cj.paramCount { - return false - } - return ci.route.Path < cj.route.Path -} diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go deleted file mode 100644 index d40405bf76..0000000000 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Package restful, a lean package for creating REST-style WebServices without magic. - -WebServices and Routes - -A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. -Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. -WebServices must be added to a container (see below) in order to handler Http requests from a server. - -A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). -This package has the logic to find the best matching Route and if found, call its Function. - - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON, restful.MIME_XML) - - ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource - - ... - - // GET http://localhost:8080/users/1 - func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... - } - -The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. - -Regular expression matching Routes - -A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. -For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. -Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) -This feature requires the use of a CurlyRouter. - -Containers - -A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. -Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. -The Default container of go-restful uses the http.DefaultServeMux. -You can create your own Container and create a new http.Server for that particular container. - - container := restful.NewContainer() - server := &http.Server{Addr: ":8081", Handler: container} - -Filters - -A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. -You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. -In the restful package there are three hooks into the request,response flow where filters can be added. -Each filter must define a FilterFunction: - - func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) - -Use the following statement to pass the request,response pair to the next filter or RouteFunction - - chain.ProcessFilter(req, resp) - -Container Filters - -These are processed before any registered WebService. - - // install a (global) filter for the default container (processed before any webservice) - restful.Filter(globalLogging) - -WebService Filters - -These are processed before any Route of a WebService. - - // install a webservice filter (processed before any route) - ws.Filter(webserviceLogging).Filter(measureTime) - - -Route Filters - -These are processed before calling the function associated with the Route. - - // install 2 chained route filters (processed before calling findUser) - ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. - -Response Encoding - -Two encodings are supported: gzip and deflate. To enable this for all responses: - - restful.DefaultContainer.EnableContentEncoding(true) - -If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. -Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go - -OPTIONS support - -By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. - - Filter(OPTIONSFilter()) - -CORS - -By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. - - cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} - Filter(cors.Filter) - -Error Handling - -Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. -For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. - - 400: Bad Request - -If path or query parameters are not valid (content or type) then use http.StatusBadRequest. - - 404: Not Found - -Despite a valid URI, the resource requested may not be available - - 500: Internal Server Error - -If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. - - 405: Method Not Allowed - -The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. - - 406: Not Acceptable - -The request does not have or has an unknown Accept Header set for this operation. - - 415: Unsupported Media Type - -The request does not have or has an unknown Content-Type Header set for this operation. - -ServiceError - -In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. - -Performance options - -This package has several options that affect the performance of your service. It is important to understand them and how you can change it. - - restful.DefaultContainer.Router(CurlyRouter{}) - -The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html). -However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time. -The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed. - - restful.DefaultContainer.DoNotRecover(true) - -DoNotRecover controls whether panics will be caught to return HTTP 500. -If set to true, Route functions are responsible for handling any error situation. -Default value is false; it will recover from panics. This has performance implications. - - restful.SetCacheReadEntity(false) - -SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable. -If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false. - - restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) - -If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. -Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. - -Trouble shooting - -This package has the means to produce detail logging of the complete Http request matching process and filter invocation. -Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: - - restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) - -Logging - -The restful.SetLogger() method allows you to override the logger used by the package. By default restful -uses the standard library `log` package and logs to stdout. Different logging packages are supported as -long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your -preferred package is simple. - -Resources - -[project]: https://github.com/emicklei/go-restful - -[examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - -[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License -*/ -package restful diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go deleted file mode 100644 index 6ecf6c7f89..0000000000 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ /dev/null @@ -1,163 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "encoding/json" - "encoding/xml" - "strings" - "sync" -) - -// EntityReaderWriter can read and write values using an encoding such as JSON,XML. -type EntityReaderWriter interface { - // Read a serialized version of the value from the request. - // The Request may have a decompressing reader. Depends on Content-Encoding. - Read(req *Request, v interface{}) error - - // Write a serialized version of the value on the response. - // The Response may have a compressing writer. Depends on Accept-Encoding. - // status should be a valid Http Status code - Write(resp *Response, status int, v interface{}) error -} - -// entityAccessRegistry is a singleton -var entityAccessRegistry = &entityReaderWriters{ - protection: new(sync.RWMutex), - accessors: map[string]EntityReaderWriter{}, -} - -// entityReaderWriters associates MIME to an EntityReaderWriter -type entityReaderWriters struct { - protection *sync.RWMutex - accessors map[string]EntityReaderWriter -} - -func init() { - RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) - RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) -} - -// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. -func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { - entityAccessRegistry.protection.Lock() - defer entityAccessRegistry.protection.Unlock() - entityAccessRegistry.accessors[mime] = erw -} - -// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. -// This package is already initialized with such an accessor using the MIME_JSON contentType. -func NewEntityAccessorJSON(contentType string) EntityReaderWriter { - return entityJSONAccess{ContentType: contentType} -} - -// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. -// This package is already initialized with such an accessor using the MIME_XML contentType. -func NewEntityAccessorXML(contentType string) EntityReaderWriter { - return entityXMLAccess{ContentType: contentType} -} - -// accessorAt returns the registered ReaderWriter for this MIME type. -func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { - r.protection.RLock() - defer r.protection.RUnlock() - er, ok := r.accessors[mime] - if !ok { - // retry with reverse lookup - // more expensive but we are in an exceptional situation anyway - for k, v := range r.accessors { - if strings.Contains(mime, k) { - return v, true - } - } - } - return er, ok -} - -// entityXMLAccess is a EntityReaderWriter for XML encoding -type entityXMLAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from XML -func (e entityXMLAccess) Read(req *Request, v interface{}) error { - return xml.NewDecoder(req.Request.Body).Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { - return writeXML(resp, status, e.ContentType, v) -} - -// writeXML marshalls the value to JSON and set the Content-Type Header. -func writeXML(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := xml.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write([]byte(xml.Header)) - if err != nil { - return err - } - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return xml.NewEncoder(resp).Encode(v) -} - -// entityJSONAccess is a EntityReaderWriter for JSON encoding -type entityJSONAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from JSON -func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) - decoder.UseNumber() - return decoder.Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { - return writeJSON(resp, status, e.ContentType, v) -} - -// write marshalls the value to JSON and set the Content-Type Header. -func writeJSON(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) -} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go deleted file mode 100644 index 4b86656e17..0000000000 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ /dev/null @@ -1,26 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. -type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters -} - -// ProcessFilter passes the request,response pair through the next of Filters. -// Each filter can decide to proceed to the next Filter or handle the Response itself. -func (f *FilterChain) ProcessFilter(request *Request, response *Response) { - if f.Index < len(f.Filters) { - f.Index++ - f.Filters[f.Index-1](request, response, f) - } else { - f.Target(request, response) - } -} - -// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction -type FilterFunction func(*Request, *Response, *FilterChain) diff --git a/vendor/github.com/emicklei/go-restful/install.sh b/vendor/github.com/emicklei/go-restful/install.sh deleted file mode 100644 index 36cbf25f82..0000000000 --- a/vendor/github.com/emicklei/go-restful/install.sh +++ /dev/null @@ -1,10 +0,0 @@ -go test -test.v ...restful && \ -go test -test.v ...swagger && \ -go vet ...restful && \ -go fmt ...swagger && \ -go install ...swagger && \ -go fmt ...restful && \ -go install ...restful -cd examples - ls *.go | xargs -I {} go build -o /tmp/ignore {} - cd .. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go deleted file mode 100644 index 511444ac68..0000000000 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ /dev/null @@ -1,248 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "fmt" - "net/http" - "sort" -) - -// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) -// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. -// RouterJSR311 implements the Router interface. -// Concept of locators is not implemented. -type RouterJSR311 struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (r RouterJSR311) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { - - // Identify the root resource class (WebService) - dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) - if err != nil { - return nil, nil, NewError(http.StatusNotFound, "") - } - // Obtain the set of candidate methods (Routes) - routes := r.selectRoutes(dispatcher, finalMatch) - if len(routes) == 0 { - return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - - // Identify the method (Route) that will handle the request - route, ok := r.detectRoute(routes, httpRequest) - return dispatcher, route, ok -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { - // http method - methodOk := []Route{} - for _, each := range routes { - if httpRequest.Method == each.Method { - methodOk = append(methodOk, each) - } - } - if len(methodOk) == 0 { - if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method) - } - return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") - } - inputMediaOk := methodOk - - // content-type - contentType := httpRequest.Header.Get(HEADER_ContentType) - inputMediaOk = []Route{} - for _, each := range methodOk { - if each.matchesContentType(contentType) { - inputMediaOk = append(inputMediaOk, each) - } - } - if len(inputMediaOk) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType) - } - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } - - // accept - outputMediaOk := []Route{} - accept := httpRequest.Header.Get(HEADER_Accept) - if len(accept) == 0 { - accept = "*/*" - } - for _, each := range inputMediaOk { - if each.matchesAccept(accept) { - outputMediaOk = append(outputMediaOk, each) - } - } - if len(outputMediaOk) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept) - } - return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") - } - // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return &outputMediaOk[0], nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -// n/m > n/* > */* -func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { - // TODO - return &routes[0] -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) -func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { - filtered := &sortableRouteCandidates{} - for _, each := range dispatcher.Routes() { - pathExpr := each.pathExpr - matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) - if matches != nil { - lastMatch := matches[len(matches)-1] - if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - filtered.candidates = append(filtered.candidates, - routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) - } - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) - } - return []Route{} - } - sort.Sort(sort.Reverse(filtered)) - - // select other routes from candidates whoes expression matches rmatch - matchingRoutes := []Route{filtered.candidates[0].route} - for c := 1; c < len(filtered.candidates); c++ { - each := filtered.candidates[c] - if each.route.pathExpr.Matcher.MatchString(pathRemainder) { - matchingRoutes = append(matchingRoutes, each.route) - } - } - return matchingRoutes -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) -func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { - filtered := &sortableDispatcherCandidates{} - for _, each := range dispatchers { - matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - filtered.candidates = append(filtered.candidates, - dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) - } - return nil, "", errors.New("not found") - } - sort.Sort(sort.Reverse(filtered)) - return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil -} - -// Types and functions to support the sorting of Routes - -type routeCandidate struct { - route Route - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} - -func (r routeCandidate) expressionToMatch() string { - return r.route.pathExpr.Source -} - -func (r routeCandidate) String() string { - return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) -} - -type sortableRouteCandidates struct { - candidates []routeCandidate -} - -func (rcs *sortableRouteCandidates) Len() int { - return len(rcs.candidates) -} -func (rcs *sortableRouteCandidates) Swap(i, j int) { - rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] -} -func (rcs *sortableRouteCandidates) Less(i, j int) bool { - ci := rcs.candidates[i] - cj := rcs.candidates[j] - // primary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // secundary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // tertiary key - if ci.nonDefaultCount < cj.nonDefaultCount { - return true - } - if ci.nonDefaultCount > cj.nonDefaultCount { - return false - } - // quaternary key ("source" is interpreted as Path) - return ci.route.Path < cj.route.Path -} - -// Types and functions to support the sorting of Dispatchers - -type dispatcherCandidate struct { - dispatcher *WebService - finalMatch string - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} -type sortableDispatcherCandidates struct { - candidates []dispatcherCandidate -} - -func (dc *sortableDispatcherCandidates) Len() int { - return len(dc.candidates) -} -func (dc *sortableDispatcherCandidates) Swap(i, j int) { - dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] -} -func (dc *sortableDispatcherCandidates) Less(i, j int) bool { - ci := dc.candidates[i] - cj := dc.candidates[j] - // primary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // secundary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // tertiary key - return ci.nonDefaultCount < cj.nonDefaultCount -} diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go deleted file mode 100644 index f70d89524a..0000000000 --- a/vendor/github.com/emicklei/go-restful/log/log.go +++ /dev/null @@ -1,31 +0,0 @@ -package log - -import ( - stdlog "log" - "os" -) - -// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} - -var Logger StdLogger - -func init() { - // default Logger - SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) -} - -func SetLogger(customLogger StdLogger) { - Logger = customLogger -} - -func Print(v ...interface{}) { - Logger.Print(v...) -} - -func Printf(format string, v ...interface{}) { - Logger.Printf(format, v...) -} diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go deleted file mode 100644 index 3f1c4db86b..0000000000 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ /dev/null @@ -1,32 +0,0 @@ -package restful - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. -import ( - "github.com/emicklei/go-restful/log" -) - -var trace bool = false -var traceLogger log.StdLogger - -func init() { - traceLogger = log.Logger // use the package logger by default -} - -// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. -// You may call EnableTracing() directly to enable trace logging to the package-wide logger. -func TraceLogger(logger log.StdLogger) { - traceLogger = logger - EnableTracing(logger != nil) -} - -// expose the setter for the global logger on the top-level package -func SetLogger(customLogger log.StdLogger) { - log.SetLogger(customLogger) -} - -// EnableTracing can be used to Trace logging on and off. -func EnableTracing(enabled bool) { - trace = enabled -} diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go deleted file mode 100644 index d7ea2b6157..0000000000 --- a/vendor/github.com/emicklei/go-restful/mime.go +++ /dev/null @@ -1,45 +0,0 @@ -package restful - -import ( - "strconv" - "strings" -) - -type mime struct { - media string - quality float64 -} - -// insertMime adds a mime to a list and keeps it sorted by quality. -func insertMime(l []mime, e mime) []mime { - for i, each := range l { - // if current mime has lower quality then insert before - if e.quality > each.quality { - left := append([]mime{}, l[0:i]...) - return append(append(left, e), l[i:]...) - } - } - return append(l, e) -} - -// sortedMimes returns a list of mime sorted (desc) by its specified quality. -func sortedMimes(accept string) (sorted []mime) { - for _, each := range strings.Split(accept, ",") { - typeAndQuality := strings.Split(strings.Trim(each, " "), ";") - if len(typeAndQuality) == 1 { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } else { - // take factor - parts := strings.Split(typeAndQuality[1], "=") - if len(parts) == 2 { - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - traceLogger.Printf("unable to parse quality in %s, %v", each, err) - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], f}) - } - } - } - } - return -} diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go deleted file mode 100644 index 4514eadcfa..0000000000 --- a/vendor/github.com/emicklei/go-restful/options_filter.go +++ /dev/null @@ -1,26 +0,0 @@ -package restful - -import "strings" - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// As for any filter, you can also install it for a particular WebService within a Container. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { - if "OPTIONS" != req.Request.Method { - chain.ProcessFilter(req, resp) - return - } - resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ",")) -} - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func OPTIONSFilter() FilterFunction { - return DefaultContainer.OPTIONSFilter -} diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go deleted file mode 100644 index e11c8162a7..0000000000 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ /dev/null @@ -1,114 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - // PathParameterKind = indicator of Request parameter type "path" - PathParameterKind = iota - - // QueryParameterKind = indicator of Request parameter type "query" - QueryParameterKind - - // BodyParameterKind = indicator of Request parameter type "body" - BodyParameterKind - - // HeaderParameterKind = indicator of Request parameter type "header" - HeaderParameterKind - - // FormParameterKind = indicator of Request parameter type "form" - FormParameterKind -) - -// Parameter is for documententing the parameter used in a Http Request -// ParameterData kinds are Path,Query and Body -type Parameter struct { - data *ParameterData -} - -// ParameterData represents the state of a Parameter. -// It is made public to make it accessible to e.g. the Swagger package. -type ParameterData struct { - Name, Description, DataType, DataFormat string - Kind int - Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string -} - -// Data returns the state of the Parameter -func (p *Parameter) Data() ParameterData { - return *p.data -} - -// Kind returns the parameter type indicator (see const for valid values) -func (p *Parameter) Kind() int { - return p.data.Kind -} - -func (p *Parameter) bePath() *Parameter { - p.data.Kind = PathParameterKind - return p -} -func (p *Parameter) beQuery() *Parameter { - p.data.Kind = QueryParameterKind - return p -} -func (p *Parameter) beBody() *Parameter { - p.data.Kind = BodyParameterKind - return p -} - -func (p *Parameter) beHeader() *Parameter { - p.data.Kind = HeaderParameterKind - return p -} - -func (p *Parameter) beForm() *Parameter { - p.data.Kind = FormParameterKind - return p -} - -// Required sets the required field and returns the receiver -func (p *Parameter) Required(required bool) *Parameter { - p.data.Required = required - return p -} - -// AllowMultiple sets the allowMultiple field and returns the receiver -func (p *Parameter) AllowMultiple(multiple bool) *Parameter { - p.data.AllowMultiple = multiple - return p -} - -// AllowableValues sets the allowableValues field and returns the receiver -func (p *Parameter) AllowableValues(values map[string]string) *Parameter { - p.data.AllowableValues = values - return p -} - -// DataType sets the dataType field and returns the receiver -func (p *Parameter) DataType(typeName string) *Parameter { - p.data.DataType = typeName - return p -} - -// DataFormat sets the dataFormat field for Swagger UI -func (p *Parameter) DataFormat(formatName string) *Parameter { - p.data.DataFormat = formatName - return p -} - -// DefaultValue sets the default value field and returns the receiver -func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { - p.data.DefaultValue = stringRepresentation - return p -} - -// Description sets the description value field and returns the receiver -func (p *Parameter) Description(doc string) *Parameter { - p.data.Description = doc - return p -} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go deleted file mode 100644 index a921e6f224..0000000000 --- a/vendor/github.com/emicklei/go-restful/path_expression.go +++ /dev/null @@ -1,69 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "fmt" - "regexp" - "strings" -) - -// PathExpression holds a compiled path expression (RegExp) needed to match against -// Http request paths and to extract path parameter values. -type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarCount int // the number of named parameters (enclosed by {}) in the path - Matcher *regexp.Regexp - Source string // Path as defined by the RouteBuilder - tokens []string -} - -// NewPathExpression creates a PathExpression from the input URL path. -// Returns an error if the path is invalid. -func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varCount, tokens := templateToRegularExpression(path) - compiled, err := regexp.Compile(expression) - if err != nil { - return nil, err - } - return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) { - var buffer bytes.Buffer - buffer.WriteString("^") - //tokens = strings.Split(template, "/") - tokens = tokenizePath(template) - for _, each := range tokens { - if each == "" { - continue - } - buffer.WriteString("/") - if strings.HasPrefix(each, "{") { - // check for regular expression in variable - colon := strings.Index(each, ":") - if colon != -1 { - // extract expression - paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) - if paramExpr == "*" { // special case - buffer.WriteString("(.*)") - } else { - buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache - } - } else { - // plain var - buffer.WriteString("([^/]+?)") - } - varCount += 1 - } else { - literalCount += len(each) - encoded := each // TODO URI encode - buffer.WriteString(regexp.QuoteMeta(encoded)) - } - } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens -} diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go deleted file mode 100644 index 3e4234697d..0000000000 --- a/vendor/github.com/emicklei/go-restful/request.go +++ /dev/null @@ -1,131 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/zlib" - "io/ioutil" - "net/http" -) - -var defaultRequestContentType string - -var doCacheReadEntityBytes = true - -// Request is a wrapper for a http Request that provides convenience methods -type Request struct { - Request *http.Request - bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees -} - -func NewRequest(httpRequest *http.Request) *Request { - return &Request{ - Request: httpRequest, - pathParameters: map[string]string{}, - attributes: map[string]interface{}{}, - } // empty parameters, attributes -} - -// If ContentType is missing or */* is given then fall back to this type, otherwise -// a "Unable to unmarshal content of type:" response is returned. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) -func DefaultRequestContentType(mime string) { - defaultRequestContentType = mime -} - -// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable. -// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it. -func SetCacheReadEntity(doCache bool) { - doCacheReadEntityBytes = doCache -} - -// PathParameter accesses the Path parameter value by its name -func (r *Request) PathParameter(name string) string { - return r.pathParameters[name] -} - -// PathParameters accesses the Path parameter values -func (r *Request) PathParameters() map[string]string { - return r.pathParameters -} - -// QueryParameter returns the (first) Query parameter value by its name -func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) -} - -// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. -func (r *Request) BodyParameter(name string) (string, error) { - err := r.Request.ParseForm() - if err != nil { - return "", err - } - return r.Request.PostFormValue(name), nil -} - -// HeaderParameter returns the HTTP Header value of a Header name or empty if missing -func (r *Request) HeaderParameter(name string) string { - return r.Request.Header.Get(name) -} - -// ReadEntity checks the Accept header and reads the content into the entityPointer. -func (r *Request) ReadEntity(entityPointer interface{}) (err error) { - contentType := r.Request.Header.Get(HEADER_ContentType) - contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) - - // OLD feature, cache the body for reads - if doCacheReadEntityBytes { - if r.bodyContent == nil { - data, err := ioutil.ReadAll(r.Request.Body) - if err != nil { - return err - } - r.bodyContent = &data - } - r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent)) - } - - // check if the request body needs decompression - if ENCODING_GZIP == contentEncoding { - gzipReader := currentCompressorProvider.AcquireGzipReader() - defer currentCompressorProvider.ReleaseGzipReader(gzipReader) - gzipReader.Reset(r.Request.Body) - r.Request.Body = gzipReader - } else if ENCODING_DEFLATE == contentEncoding { - zlibReader, err := zlib.NewReader(r.Request.Body) - if err != nil { - return err - } - r.Request.Body = zlibReader - } - - // lookup the EntityReader - entityReader, ok := entityAccessRegistry.accessorAt(contentType) - if !ok { - return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) - } - return entityReader.Read(r, entityPointer) -} - -// SetAttribute adds or replaces the attribute with the given value. -func (r *Request) SetAttribute(name string, value interface{}) { - r.attributes[name] = value -} - -// Attribute returns the value associated to the given name. Returns nil if absent. -func (r Request) Attribute(name string) interface{} { - return r.attributes[name] -} - -// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees -func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath -} diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go deleted file mode 100644 index 971cd0b42c..0000000000 --- a/vendor/github.com/emicklei/go-restful/response.go +++ /dev/null @@ -1,235 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "net/http" -) - -// DEPRECATED, use DefaultResponseContentType(mime) -var DefaultResponseMimeType string - -//PrettyPrintResponses controls the indentation feature of XML and JSON serialization -var PrettyPrintResponses = true - -// Response is a wrapper on the actual http ResponseWriter -// It provides several convenience methods to prepare and write response content. -type Response struct { - http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called -} - -// Creates a new response based on a http ResponseWriter. -func NewResponse(httpWriter http.ResponseWriter) *Response { - return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types -} - -// If Accept header matching fails, fall back to this type. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultResponseContentType(restful.MIME_JSON) -func DefaultResponseContentType(mime string) { - DefaultResponseMimeType = mime -} - -// InternalServerError writes the StatusInternalServerError header. -// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) -func (r Response) InternalServerError() Response { - r.WriteHeader(http.StatusInternalServerError) - return r -} - -// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. -func (r *Response) PrettyPrint(bePretty bool) { - r.prettyPrint = bePretty -} - -// AddHeader is a shortcut for .Header().Add(header,value) -func (r Response) AddHeader(header string, value string) Response { - r.Header().Add(header, value) - return r -} - -// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. -func (r *Response) SetRequestAccepts(mime string) { - r.requestAccept = mime -} - -// EntityWriter returns the registered EntityWriter that the entity (requested resource) -// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. -// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. -func (r *Response) EntityWriter() (EntityReaderWriter, bool) { - sorted := sortedMimes(r.requestAccept) - for _, eachAccept := range sorted { - for _, eachProduce := range r.routeProduces { - if eachProduce == eachAccept.media { - if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { - return w, true - } - } - } - if eachAccept.media == "*/*" { - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - } - } - // if requestAccept is empty - writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) - if !ok { - // if not registered then fallback to the defaults (if set) - if DefaultResponseMimeType == MIME_JSON { - return entityAccessRegistry.accessorAt(MIME_JSON) - } - if DefaultResponseMimeType == MIME_XML { - return entityAccessRegistry.accessorAt(MIME_XML) - } - // Fallback to whatever the route says it can produce. - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - if trace { - traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) - } - } - return writer, ok -} - -// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) -func (r *Response) WriteEntity(value interface{}) error { - return r.WriteHeaderAndEntity(http.StatusOK, value) -} - -// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. -// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. -// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. -// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. -// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. -// Current implementation ignores any q-parameters in the Accept Header. -// Returns an error if the value could not be written on the response. -func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { - writer, ok := r.EntityWriter() - if !ok { - r.WriteHeader(http.StatusNotAcceptable) - return nil - } - return writer.Write(r, status, value) -} - -// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsXml(value interface{}) error { - return writeXML(r, http.StatusOK, MIME_XML, value) -} - -// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { - return writeXML(r, status, MIME_XML, value) -} - -// WriteAsJson is a convenience method for writing a value in json. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsJson(value interface{}) error { - return writeJSON(r, http.StatusOK, MIME_JSON, value) -} - -// WriteJson is a convenience method for writing a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteJson(value interface{}, contentType string) error { - return writeJSON(r, http.StatusOK, contentType, value) -} - -// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { - return writeJSON(r, status, contentType, value) -} - -// WriteError write the http status and the error string on the response. -func (r *Response) WriteError(httpStatus int, err error) error { - r.err = err - return r.WriteErrorString(httpStatus, err.Error()) -} - -// WriteServiceError is a convenience method for a responding with a status and a ServiceError -func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { - r.err = err - return r.WriteHeaderAndEntity(httpStatus, err) -} - -// WriteErrorString is a convenience method for an error status with the actual error -func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { - if r.err == nil { - // if not called from WriteError - r.err = errors.New(errorReason) - } - r.WriteHeader(httpStatus) - if _, err := r.Write([]byte(errorReason)); err != nil { - return err - } - return nil -} - -// Flush implements http.Flusher interface, which sends any buffered data to the client. -func (r *Response) Flush() { - if f, ok := r.ResponseWriter.(http.Flusher); ok { - f.Flush() - } else if trace { - traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) - } -} - -// WriteHeader is overridden to remember the Status Code that has been written. -// Changes to the Header of the response have no effect after this. -func (r *Response) WriteHeader(httpStatus int) { - r.statusCode = httpStatus - r.ResponseWriter.WriteHeader(httpStatus) -} - -// StatusCode returns the code that has been written using WriteHeader. -func (r Response) StatusCode() int { - if 0 == r.statusCode { - // no status code has been written yet; assume OK - return http.StatusOK - } - return r.statusCode -} - -// Write writes the data to the connection as part of an HTTP reply. -// Write is part of http.ResponseWriter interface. -func (r *Response) Write(bytes []byte) (int, error) { - written, err := r.ResponseWriter.Write(bytes) - r.contentLength += written - return written, err -} - -// ContentLength returns the number of bytes written for the response content. -// Note that this value is only correct if all data is written through the Response using its Write* methods. -// Data written directly using the underlying http.ResponseWriter is not accounted for. -func (r Response) ContentLength() int { - return r.contentLength -} - -// CloseNotify is part of http.CloseNotifier interface -func (r Response) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Error returns the err created by WriteError -func (r Response) Error() error { - return r.err -} diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go deleted file mode 100644 index f54e8622e3..0000000000 --- a/vendor/github.com/emicklei/go-restful/route.go +++ /dev/null @@ -1,183 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "net/http" - "strings" -) - -// RouteFunction declares the signature of a function that can be bound to a Route. -type RouteFunction func(*Request, *Response) - -// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. -type Route struct { - Method string - Produces []string - Consumes []string - Path string // webservice root path + described path - Function RouteFunction - Filters []FilterFunction - - // cached values for dispatching - relativePath string - pathParts []string - pathExpr *pathExpression // cached compilation of relativePath as RegExp - - // documentation - Doc string - Notes string - Operation string - ParameterDocs []*Parameter - ResponseErrors map[int]ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload -} - -// Initialize for Route -func (r *Route) postBuild() { - r.pathParts = tokenizePath(r.Path) -} - -// Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - params := r.extractParameters(httpRequest.URL.Path) - wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = params - wrappedRequest.selectedRoutePath = r.Path - wrappedResponse := NewResponse(httpWriter) - wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) - wrappedResponse.routeProduces = r.Produces - return wrappedRequest, wrappedResponse -} - -// dispatchWithFilters call the function after passing through its own filters -func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { - if len(r.Filters) > 0 { - chain := FilterChain{Filters: r.Filters, Target: r.Function} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // unfiltered - r.Function(wrappedRequest, wrappedResponse) - } -} - -// Return whether the mimeType matches to what this Route can produce. -func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - parts := strings.Split(mimeTypesWithQuality, ",") - for _, each := range parts { - var withoutQuality string - if strings.Contains(each, ";") { - withoutQuality = strings.Split(each, ";")[0] - } else { - withoutQuality = each - } - // trim before compare - withoutQuality = strings.Trim(withoutQuality, " ") - if withoutQuality == "*/*" { - return true - } - for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == withoutQuality { - return true - } - } - } - return false -} - -// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). -func (r Route) matchesContentType(mimeTypes string) bool { - - if len(r.Consumes) == 0 { - // did not specify what it can consume ; any media type (“*/*”) is assumed - return true - } - - if len(mimeTypes) == 0 { - // idempotent methods with (most-likely or garanteed) empty content match missing Content-Type - m := r.Method - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true - } - // proceed with default - mimeTypes = MIME_OCTET - } - - parts := strings.Split(mimeTypes, ",") - for _, each := range parts { - var contentType string - if strings.Contains(each, ";") { - contentType = strings.Split(each, ";")[0] - } else { - contentType = each - } - // trim before compare - contentType = strings.Trim(contentType, " ") - for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == contentType { - return true - } - } - } - return false -} - -// Extract the parameters from the request url path -func (r Route) extractParameters(urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if strings.HasPrefix(key, "{") { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - pathParameters[key[1:len(key)-1]] = value - } - } - } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") - } - } - return buffer.String() -} - -// Tokenize an URL path using the slash separator ; the result does not have empty tokens -func tokenizePath(path string) []string { - if "/" == path { - return []string{} - } - return strings.Split(strings.Trim(path, "/"), "/") -} - -// for debugging -func (r Route) String() string { - return r.Method + " " + r.Path -} diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go deleted file mode 100644 index 8bc1ab6846..0000000000 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ /dev/null @@ -1,240 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "os" - "reflect" - "runtime" - "strings" - - "github.com/emicklei/go-restful/log" -) - -// RouteBuilder is a helper to construct Routes. -type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError -} - -// Do evaluates each argument with the RouteBuilder itself. -// This allows you to follow DRY principles without breaking the fluent programming style. -// Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) -// -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } -func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { - for _, each := range oneArgBlocks { - each(b) - } - return b -} - -// To bind the route to a function. -// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. -func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { - b.function = function - return b -} - -// Method specifies what HTTP method to match. Required. -func (b *RouteBuilder) Method(method string) *RouteBuilder { - b.httpMethod = method - return b -} - -// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. -func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { - b.produces = mimeTypes - return b -} - -// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these -func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { - b.consumes = mimeTypes - return b -} - -// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". -func (b *RouteBuilder) Path(subPath string) *RouteBuilder { - b.currentPath = subPath - return b -} - -// Doc tells what this route is all about. Optional. -func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { - b.doc = documentation - return b -} - -// A verbose explanation of the operation behavior. Optional. -func (b *RouteBuilder) Notes(notes string) *RouteBuilder { - b.notes = notes - return b -} - -// Reads tells what resource type will be read from the request payload. Optional. -// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder { - b.readSample = sample - typeAsName := reflect.TypeOf(sample).String() - bodyParameter := &Parameter{&ParameterData{Name: "body"}} - bodyParameter.beBody() - bodyParameter.Required(true) - bodyParameter.DataType(typeAsName) - b.Param(bodyParameter) - return b -} - -// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. -// Use this to modify or extend information for the Parameter (through its Data()). -func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { - for _, each := range b.parameters { - if each.Data().Name == name { - return each - } - } - return p -} - -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample - return b -} - -// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). -func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { - if b.parameters == nil { - b.parameters = []*Parameter{} - } - b.parameters = append(b.parameters, parameter) - return b -} - -// Operation allows you to document what the actual method/function call is of the Route. -// Unless called, the operation name is derived from the RouteFunction set using To(..). -func (b *RouteBuilder) Operation(name string) *RouteBuilder { - b.operation = name - return b -} - -// ReturnsError is deprecated, use Returns instead. -func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { - log.Print("ReturnsError is deprecated, use Returns instead.") - return b.Returns(code, message, model) -} - -// Returns allows you to document what responses (errors or regular) can be expected. -// The model parameter is optional ; either pass a struct instance or use nil if not applicable. -func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { - err := ResponseError{ - Code: code, - Message: message, - Model: model, - } - // lazy init because there is no NewRouteBuilder (yet) - if b.errorMap == nil { - b.errorMap = map[int]ResponseError{} - } - b.errorMap[code] = err - return b -} - -type ResponseError struct { - Code int - Message string - Model interface{} -} - -func (b *RouteBuilder) servicePath(path string) *RouteBuilder { - b.rootPath = path - return b -} - -// Filter appends a FilterFunction to the end of filters for this Route to build. -func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { - b.filters = append(b.filters, filter) - return b -} - -// If no specific Route path then set to rootPath -// If no specific Produces then set to rootProduces -// If no specific Consumes then set to rootConsumes -func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { - if len(b.produces) == 0 { - b.produces = rootProduces - } - if len(b.consumes) == 0 { - b.consumes = rootConsumes - } -} - -// Build creates a new Route using the specification details collected by the RouteBuilder -func (b *RouteBuilder) Build() Route { - pathExpr, err := newPathExpression(b.currentPath) - if err != nil { - log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err) - os.Exit(1) - } - if b.function == nil { - log.Printf("[restful] No function specified for route:" + b.currentPath) - os.Exit(1) - } - operationName := b.operation - if len(operationName) == 0 && b.function != nil { - // extract from definition - operationName = nameOfFunction(b.function) - } - route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - ReadSample: b.readSample, - WriteSample: b.writeSample} - route.postBuild() - return route -} - -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") -} - -// nameOfFunction returns the short name of the function f for documentation. -// It uses a runtime feature for debugging ; its value may change for later Go versions. -func nameOfFunction(f interface{}) string { - fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) - tokenized := strings.Split(fun.Name(), ".") - last := tokenized[len(tokenized)-1] - last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 - last = strings.TrimSuffix(last, ")-fm") // Go 1.5 - last = strings.TrimSuffix(last, "·fm") // < Go 1.5 - last = strings.TrimSuffix(last, "-fm") // Go 1.5 - return last -} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go deleted file mode 100644 index 9b32fb6753..0000000000 --- a/vendor/github.com/emicklei/go-restful/router.go +++ /dev/null @@ -1,18 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "net/http" - -// A RouteSelector finds the best matching Route given the input HTTP Request -type RouteSelector interface { - - // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. - // It returns a selected Route and its containing WebService or an error indicating - // a problem. - SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) -} diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go deleted file mode 100644 index 62d1108bbd..0000000000 --- a/vendor/github.com/emicklei/go-restful/service_error.go +++ /dev/null @@ -1,23 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "fmt" - -// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. -type ServiceError struct { - Code int - Message string -} - -// NewError returns a ServiceError using the code and reason -func NewError(code int, message string) ServiceError { - return ServiceError{Code: code, Message: message} -} - -// Error returns a text representation of the service error -func (s ServiceError) Error() string { - return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) -} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go deleted file mode 100644 index 2a51004f80..0000000000 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ /dev/null @@ -1,268 +0,0 @@ -package restful - -import ( - "errors" - "os" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. -type WebService struct { - rootPath string - pathExpr *pathExpression // cached compilation of rootPath as RegExp - routes []Route - produces []string - consumes []string - pathParameters []*Parameter - filters []FilterFunction - documentation string - apiVersion string - - dynamicRoutes bool - - // protects 'routes' if dynamic routes are enabled - routesLock sync.RWMutex -} - -func (w *WebService) SetDynamicRoutes(enable bool) { - w.dynamicRoutes = enable -} - -// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. -func (w *WebService) compilePathExpression() { - compiled, err := newPathExpression(w.rootPath) - if err != nil { - log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) - os.Exit(1) - } - w.pathExpr = compiled -} - -// ApiVersion sets the API version for documentation purposes. -func (w *WebService) ApiVersion(apiVersion string) *WebService { - w.apiVersion = apiVersion - return w -} - -// Version returns the API version for documentation purposes. -func (w *WebService) Version() string { return w.apiVersion } - -// Path specifies the root URL template path of the WebService. -// All Routes will be relative to this path. -func (w *WebService) Path(root string) *WebService { - w.rootPath = root - if len(w.rootPath) == 0 { - w.rootPath = "/" - } - w.compilePathExpression() - return w -} - -// Param adds a PathParameter to document parameters used in the root path. -func (w *WebService) Param(parameter *Parameter) *WebService { - if w.pathParameters == nil { - w.pathParameters = []*Parameter{} - } - w.pathParameters = append(w.pathParameters, parameter) - return w -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) PathParameter(name, description string) *Parameter { - return PathParameter(name, description) -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func PathParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} - p.bePath() - return p -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) QueryParameter(name, description string) *Parameter { - return QueryParameter(name, description) -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beQuery() - return p -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func (w *WebService) BodyParameter(name, description string) *Parameter { - return BodyParameter(name, description) -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func BodyParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} - p.beBody() - return p -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) HeaderParameter(name, description string) *Parameter { - return HeaderParameter(name, description) -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func HeaderParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beHeader() - return p -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) FormParameter(name, description string) *Parameter { - return FormParameter(name, description) -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func FormParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beForm() - return p -} - -// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. -func (w *WebService) Route(builder *RouteBuilder) *WebService { - w.routesLock.Lock() - defer w.routesLock.Unlock() - builder.copyDefaults(w.produces, w.consumes) - w.routes = append(w.routes, builder.Build()) - return w -} - -// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' -func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current = current + 1 - } - w.routes = newRoutes - return nil -} - -// Method creates a new RouteBuilder and initialize its http method -func (w *WebService) Method(httpMethod string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod) -} - -// Produces specifies that this WebService can produce one or more MIME types. -// Http requests must have one of these values set for the Accept header. -func (w *WebService) Produces(contentTypes ...string) *WebService { - w.produces = contentTypes - return w -} - -// Consumes specifies that this WebService can consume one or more MIME types. -// Http requests must have one of these values set for the Content-Type header. -func (w *WebService) Consumes(accepts ...string) *WebService { - w.consumes = accepts - return w -} - -// Routes returns the Routes associated with this WebService -func (w *WebService) Routes() []Route { - if !w.dynamicRoutes { - return w.routes - } - // Make a copy of the array to prevent concurrency problems - w.routesLock.RLock() - defer w.routesLock.RUnlock() - result := make([]Route, len(w.routes)) - for ix := range w.routes { - result[ix] = w.routes[ix] - } - return result -} - -// RootPath returns the RootPath associated with this WebService. Default "/" -func (w *WebService) RootPath() string { - return w.rootPath -} - -// PathParameters return the path parameter names for (shared amoung its Routes) -func (w *WebService) PathParameters() []*Parameter { - return w.pathParameters -} - -// Filter adds a filter function to the chain of filters applicable to all its Routes -func (w *WebService) Filter(filter FilterFunction) *WebService { - w.filters = append(w.filters, filter) - return w -} - -// Doc is used to set the documentation of this service. -func (w *WebService) Doc(plainText string) *WebService { - w.documentation = plainText - return w -} - -// Documentation returns it. -func (w *WebService) Documentation() string { - return w.documentation -} - -/* - Convenience methods -*/ - -// HEAD is a shortcut for .Method("HEAD").Path(subPath) -func (w *WebService) HEAD(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath) -} - -// GET is a shortcut for .Method("GET").Path(subPath) -func (w *WebService) GET(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath) -} - -// POST is a shortcut for .Method("POST").Path(subPath) -func (w *WebService) POST(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath) -} - -// PUT is a shortcut for .Method("PUT").Path(subPath) -func (w *WebService) PUT(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath) -} - -// PATCH is a shortcut for .Method("PATCH").Path(subPath) -func (w *WebService) PATCH(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath) -} - -// DELETE is a shortcut for .Method("DELETE").Path(subPath) -func (w *WebService) DELETE(subPath string) *RouteBuilder { - return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath) -} diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go deleted file mode 100644 index c9d31b06c4..0000000000 --- a/vendor/github.com/emicklei/go-restful/web_service_container.go +++ /dev/null @@ -1,39 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" -) - -// DefaultContainer is a restful.Container that uses http.DefaultServeMux -var DefaultContainer *Container - -func init() { - DefaultContainer = NewContainer() - DefaultContainer.ServeMux = http.DefaultServeMux -} - -// If set the true then panics will not be caught to return HTTP 500. -// In that case, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) -var DoNotRecover = false - -// Add registers a new WebService add it to the DefaultContainer. -func Add(service *WebService) { - DefaultContainer.Add(service) -} - -// Filter appends a container FilterFunction from the DefaultContainer. -// These are called before dispatching a http.Request to a WebService. -func Filter(filter FilterFunction) { - DefaultContainer.Filter(filter) -} - -// RegisteredWebServices returns the collections of WebServices from the DefaultContainer -func RegisteredWebServices() []*WebService { - return DefaultContainer.RegisteredWebServices() -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.drone.sec b/vendor/github.com/go-openapi/jsonpointer/.drone.sec deleted file mode 100644 index a1d7bbe076..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.pDqezepze0YqRx4u6M8GFaWtnVR-utTWZic-GX-RvMATAoYpG4H2sc9tlnGNCxa44dbRY0vY10qfBU7Sno8vkp21fsK42ofGLfen_suum_0ilm0sFS0X-kAwk7TIq5L5lPPKiChPMUiGp5oJW-g5MqMFX1jNiI-4fP-vSM3B3-eyZtJD_O517TgfIRLnblCzqwIkyRmAfPNopi-Fe8Y31TmO2Vd0nFc1Aqro_VaJSACzEVxOHTNpjETcMjlYzwgMXLeiAfLV-5hM0f6DXgHMlLSuMkB_Ndnw25dkB7hreGk4x0tHQ3X9mUfTgLq1hIDoyeeKDIM83Tqw4LBRph20BQ.qd_pNuyi23B0PlWz.JtpO7kqOm0SWOGzWDalkWheHuNd-eDpVbqI9WPAEFDOIBvz7TbsYMBlIYVWEGWbat4mkx_ejxnMn1L1l996NJnyP7eY-QE82cfPJbjx94d0Ob70KZ4DCm_UxcY2t-OKFiPJqxW7MA5jKyDuGD16bdxpjLEoe_cMSEr8FNu-MVG6wcchPcyYyRkqTQSl4mb09KikkAzHjwjo-DcO0f8ps4Uzsoc0aqAAWdE-ocG0YqierLoemjusYMiLH-eLF6MvaLRvHSte-cLzPuYCeZURnBDgxu3i3UApgddnX7g1c7tdGGBGvgCl-tEEDW58Vxgdjksim2S7y3lfoJ8FFzSWeRH2y7Kq04hgew3b2J_RiDB9ejzIopzG8ZGjJa3EO1-i9ORTl12nXK1RdlLGqu604ENaeVOPCIHL-0C8e6_wHdUGHydLZImSxKYSrNvy8resP1D_9t4B-3q2mkS9mhnMONrXbPDVw5QY5mvXlWs0Db99ARwzsl-Qlu0A_tsZwMjWT2I1QMvWPyTRScmMm0FJSv9zStjzxWa_q2GL7Naz1fI4Dd6ZgNJWYYq-mHN5chEeBdIcwb_zMPHczMQXXNL5nmfRGM1aPffkToFWCDpIlI8IXec83ZC6_POxZegS6n9Drrvc.6Nz8EXxs1lWX3ASaCeNElA \ No newline at end of file diff --git a/vendor/github.com/go-openapi/jsonpointer/.drone.yml b/vendor/github.com/go-openapi/jsonpointer/.drone.yml deleted file mode 100644 index cb8c7b50ad..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.drone.yml +++ /dev/null @@ -1,32 +0,0 @@ -clone: - path: github.com/go-openapi/jsonpointer - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u github.com/go-openapi/swag - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore deleted file mode 100644 index 769c244007..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml b/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml deleted file mode 100644 index 5ec183e224..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e3..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md deleted file mode 100644 index 9c9b1fd488..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonpointer [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonpointer/status.svg)](https://ci.vmware.run/go-openapi/jsonpointer) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonpointer/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) -An implementation of JSON Pointer - Go language - -## Status -Completed YES - -Tested YES - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go deleted file mode 100644 index 39dd012c2a..0000000000 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package jsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -const ( - emptyPointer = `` - pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator -) - -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONPointable interface { - JSONLookup(string) (interface{}, error) -} - -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error -} - -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation -type Pointer struct { - referenceTokens []string -} - -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } - } - } - - return err -} - -// Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - kind := reflect.Invalid - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind = rValue.Kind() - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - } - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - if mv.IsValid() && !swag.IsZero(mv) { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return nil, kind, err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - return node, kind, nil - } - - for _, token := range p.referenceTokens { - - decodedToken := Unescape(token) - - r, knd, err := getSingleImpl(node, decodedToken, nameProvider) - if err != nil { - return nil, knd, err - } - node, kind = r, knd - - } - - rValue := reflect.ValueOf(node) - kind = rValue.Kind() - - return node, kind, nil -} - -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) - } - return result -} - -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 -} - -// Pointer to string representation function -func (p *Pointer) String() string { - - if len(p.referenceTokens) == 0 { - return emptyPointer - } - - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -const ( - encRefTok0 = `~0` - encRefTok1 = `~1` - decRefTok0 = `~` - decRefTok1 = `/` -) - -// Unescape unescapes a json pointer reference token string to the original representation -func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) - return step2 -} - -// Escape escapes a pointer reference token string -func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) - return step2 -} diff --git a/vendor/github.com/go-openapi/jsonreference/.drone.sec b/vendor/github.com/go-openapi/jsonreference/.drone.sec deleted file mode 100644 index 5ff54fb9c9..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Xe40Wx6g5Y-iN0JVMhKyFfubtOId3zAVE564szw_yYGzFNhc_cGZO9F3BtAcJ55CfHG9C_ozn9dpnUDl_zYZoy_6cPCq13Ekb95z8NAC3ekDtbAATsc9HZwRNwI7UfkhstdwxljEouGB01qoLcUn6lFutrou-Ho21COHeDb2caemnPSA-rEAnXkOiBFu0RQ1MIwMygzvHXIHHYNpNwAtXqmiggM10miSjqBM3JmRPxCi7VK6_Rxij5p6LlhmK1BDi8Y6oBh-9BX3--5GAJeWZ6Vof5TnP-Enioia18j8c8KFtfY4q0y6Ednjb-AarLZ12gj695ppkBNJUdTJQmwGwA.fVcz_RiLrUB5fgMS.rjWllDYC6m_NB-ket_LizNEy9mlJ27odBTZQcMKaUqqXZBtWUCmPrOoMXGq-_cc-c7chg7D-WMh9SPQ23pV0P-DY-jsDpbOqHG2STOMEfW9ZREoaOLJXQaWcuBldLjRyWFcq0HGj97LgE6szD1Zlou3bmdHS_Q-U9Up9YQ_8_YnDcESD_cj1w5FZom7HjchKJFeGjQjfDQpoCKCQNMJaavUqy9jHQEeQ_uVocSrETg3GpewDcUF2tuv8uGq7ZZWu7Vl8zmnY1MFTynaGBWzTCSRmCkAXjcsaUheDP_NT5D7k-xUS6LwtqEUiXAXV07SNFraorFj5lnBQZRDlZMYcA3NWR6zHiOxekR9LBYPofst6w1rIqUchj_5m1tDpVTBMPir1eAaFcnJtPgo4ch17OF-kmcmQGLhJI3U7n8wv4sTrmP1dewtRRKrvlJe5r3_6eDiK4xZ8K0rnK1D4g6zuQqU1gA8KaU7pmZkKpFx3Bew4v-6DH32YwQBvAI7Lbb8afou9WsCNB_iswz5XGimP4bifiJRwpWBEz9VGhZFdiw-hZpYWgbxzVb5gtqfTDLIvpbLDmFz1vge16uUQHHVFpo1pSozyr7A60X8qsh9pmmO3RcJ-ZGZBWqiRC-Kl5ejz7WQ.LFoK4Ibi11B2lWQ5WcPSag \ No newline at end of file diff --git a/vendor/github.com/go-openapi/jsonreference/.drone.yml b/vendor/github.com/go-openapi/jsonreference/.drone.yml deleted file mode 100644 index 157ffe5798..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/.drone.yml +++ /dev/null @@ -1,33 +0,0 @@ -clone: - path: github.com/go-openapi/jsonreference - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u github.com/PuerkitoBio/purell - - go get -u github.com/go-openapi/jsonpointer - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore deleted file mode 100644 index 769c244007..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml b/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml deleted file mode 100644 index 5ec183e224..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e3..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md deleted file mode 100644 index 5f7881274e..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonreference [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonreference/status.svg)](https://ci.vmware.run/go-openapi/jsonreference) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonreference/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) -An implementation of JSON Reference - Go language - -## Status -Work in progress ( 90% done ) - -## Dependencies -https://github.com/xeipuuv/gojsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go deleted file mode 100644 index 3bc0a6e26f..0000000000 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package jsonreference - -import ( - "errors" - "net/url" - "strings" - - "github.com/PuerkitoBio/purell" - "github.com/go-openapi/jsonpointer" -) - -const ( - fragmentRune = `#` -) - -// New creates a new reference for the given string -func New(jsonReferenceString string) (Ref, error) { - - var r Ref - err := r.parse(jsonReferenceString) - return r, err - -} - -// MustCreateRef parses the ref string and panics when it's invalid. -// Use the New method for a version that returns an error -func MustCreateRef(ref string) Ref { - r, err := New(ref) - if err != nil { - panic(err) - } - return r -} - -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -// GetURL gets the URL for this reference -func (r *Ref) GetURL() *url.URL { - return r.referenceURL -} - -// GetPointer gets the json pointer for this reference -func (r *Ref) GetPointer() *jsonpointer.Pointer { - return &r.referencePointer -} - -// String returns the best version of the url for this reference -func (r *Ref) String() string { - - if r.referenceURL != nil { - return r.referenceURL.String() - } - - if r.HasFragmentOnly { - return fragmentRune + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -// IsRoot returns true if this reference is a root document -func (r *Ref) IsRoot() bool { - return r.referenceURL != nil && - !r.IsCanonical() && - !r.HasURLPathOnly && - r.referenceURL.Fragment == "" -} - -// IsCanonical returns true when this pointer starts with http(s):// or file:// -func (r *Ref) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) -} - -// "Constructor", parses the given string JSON reference -func (r *Ref) parse(jsonReferenceString string) error { - - parsed, err := url.Parse(jsonReferenceString) - if err != nil { - return err - } - - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) - refURL := r.referenceURL - - if refURL.Scheme != "" && refURL.Host != "" { - r.HasFullURL = true - } else { - if refURL.Path != "" { - r.HasURLPathOnly = true - } else if refURL.RawQuery == "" && refURL.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refURL.Scheme == "file" - r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = jsonpointer.New(refURL.Fragment) - - return nil -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/vendor/github.com/go-openapi/spec/.drone.sec b/vendor/github.com/go-openapi/spec/.drone.sec deleted file mode 100644 index 60c5ebe38c..0000000000 --- a/vendor/github.com/go-openapi/spec/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Epk8dDFH8U1RPYIPDpajZO26L5zFJ1wnQNGWxVHHo5cXrWF148kENoZzh35FT9cAxxPS_4CeVVpf59EgvCc8bem1puuj0gBZptn-lYa7iXZdI-ESN2Te7nF5VbZfwbnI62nEikYGyxz-ozL_IFuMl-qWek4iLerF8Z_xh0MZOJ_w8Nog7qb2WQov72d997TJv5ZKjWcRYPbnsAy1q60-Cqxq3a6enhcSPXqpK46nYSXGKfHvognWBJ_pxwkEqIBPN6hE4EfNtJjMf2LFKEdYy02nbHz78d-2YZ8wIUSJ-IWIwn3GTzObdGqRed20Qf3JtWTsOespmexDrLSeo3HW6A.7XaHW-Y1jjRAWt_W.S1Adut62RLOYZc-lN02M0MGczEucch3zIr4J1UPBPnZooWzntiE5UaUz0UdhjHVszQE5hTfG-yocKD1rDQGER6qrLtnJVrCm9J3n4lHglM-xOz1eZln1XKrWcAgZnAKaKSzuAa5scPG4iTHW6RwbWi_PWm04tBJ1yazdjaVo3uvuhflwvU9if7uMPMtscrDesbBVvpG89xmeudiFjX-wjsV5oGBIjz6ukEBAMKzNDMqikNoG4SnGenpxUpjUjMkDXxiC3BC8oL2_myeIfFeEOF066DqEN3CLkqBVO25zdpWAF4Ou2jKv--mgGEb_E1aMgiSoAVBnybene0TKn2IJ8rtkyRdmWlLIRKZdDT3v775C1FPK6-tYzS7NVg9nnuvpta5PhzYNkqI1Ie74Sl0I-RFClhsdx9dLDhoFEKCx2etC4UDX9jhj2u0Y2MrL76dRGE9kEV1hL1fh6HMvS4ZAAWw3Qce4skCjcL-2YyIOHzKjgLGkZsR5cTUQwCJyacVkdHUOUKFdDGZaUzWkFyeZ1oyrlG2d52svaplpU5-vCOVbWkqUN9rOALGPTC51Ur0L7DFx29aDImhaxZqTe2t9mcdqY7VLcO3JgUiD3JKsEet7s2EDeN44MqITv9KBS8wqJW4.sRv4ov0wB0IxTHw90kJy-A \ No newline at end of file diff --git a/vendor/github.com/go-openapi/spec/.drone.yml b/vendor/github.com/go-openapi/spec/.drone.yml deleted file mode 100644 index 6d04427372..0000000000 --- a/vendor/github.com/go-openapi/spec/.drone.yml +++ /dev/null @@ -1,35 +0,0 @@ -clone: - path: github.com/go-openapi/spec - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/jsonpointer - - go get -u github.com/go-openapi/jsonreference - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index dd91ed6a04..0000000000 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/spec/.pullapprove.yml b/vendor/github.com/go-openapi/spec/.pullapprove.yml deleted file mode 100644 index 5ec183e224..0000000000 --- a/vendor/github.com/go-openapi/spec/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e3..0000000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 4b2af124ac..0000000000 --- a/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents \ No newline at end of file diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index 54b98e612b..0000000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,260 +0,0 @@ -// Code generated by go-bindata. -// sources: -// schemas/jsonschema-draft-04.json -// schemas/v2/schema.json -// DO NOT EDIT! - -package spec - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") - -func jsonschemaDraft04JSONBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04JSON, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04JSON() (*asset, error) { - bytes, err := jsonschemaDraft04JSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1460872076, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\xff\x1b\x00\x00\xff\xff\x3d\xeb\x17\xb5\x38\x9d\x00\x00") - -func v2SchemaJSONBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJSON, - "v2/schema.json", - ) -} - -func v2SchemaJSON() (*asset, error) { - bytes, err := v2SchemaJSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(420), modTime: time.Unix(1473777784, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, - "v2": &bintree{nil, map[string]*bintree{ - "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index f285970aa1..0000000000 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index eb1490b055..0000000000 --- a/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,626 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strings" - "sync" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.Mutex - store map[string]interface{} -} - -var resCache = initResolutionCache() - -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -func (s *simpleCache) Get(uri string) (interface{}, bool) { - s.lock.Lock() - v, ok := s.store[uri] - s.lock.Unlock() - return v, ok -} - -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -// ResolveRef resolves a reference against a context root -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) - if err != nil { - return nil, err - } - - result := new(Schema) - if err := resolver.Resolve(ref, result); err != nil { - return nil, err - } - return result, nil -} - -// ResolveParameter resolves a paramter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) - if err != nil { - return nil, err - } - - result := new(Parameter) - if err := resolver.Resolve(&ref, result); err != nil { - return nil, err - } - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) - if err != nil { - return nil, err - } - - result := new(Response) - if err := resolver.Resolve(&ref, result); err != nil { - return nil, err - } - return result, nil -} - -type schemaLoader struct { - loadingRef *Ref - startingRef *Ref - currentRef *Ref - root interface{} - cache ResolutionCache - loadDoc func(string) (json.RawMessage, error) -} - -var idPtr, _ = jsonpointer.New("/id") -var schemaPtr, _ = jsonpointer.New("/$schema") -var refPtr, _ = jsonpointer.New("/$ref") - -func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { - if cache == nil { - cache = resCache - } - - var ptr *jsonpointer.Pointer - if ref != nil { - ptr = ref.GetPointer() - } - - currentRef := nextRef(root, ref, ptr) - - return &schemaLoader{ - root: root, - loadingRef: ref, - startingRef: ref, - cache: cache, - loadDoc: func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - }, - currentRef: currentRef, - }, nil -} - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - rf, _ := NewRef(refRef.(string)) - nw, err := ret.Inherits(rf) - if err != nil { - break - } - ret = nw - } - - } - return ret -} - -func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - oldRef := currentRef - if currentRef != nil { - var err error - currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) - if err != nil { - return err - } - } - if currentRef == nil { - currentRef = ref - } - - refURL := currentRef.GetURL() - if refURL == nil { - return nil - } - if currentRef.IsRoot() { - nv := reflect.ValueOf(node) - reflect.Indirect(tgt).Set(reflect.Indirect(nv)) - return nil - } - - if strings.HasPrefix(refURL.String(), "#") { - res, _, err := ref.GetPointer().Get(node) - if err != nil { - res, _, err = ref.GetPointer().Get(r.root) - if err != nil { - return err - } - } - rv := reflect.Indirect(reflect.ValueOf(res)) - tgtType := reflect.Indirect(tgt).Type() - if rv.Type().AssignableTo(tgtType) { - reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res))) - } else { - if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil { - return err - } - } - - return nil - } - - if refURL.Scheme != "" && refURL.Host != "" { - // most definitely take the red pill - data, _, _, err := r.load(refURL) - if err != nil { - return err - } - - if ((oldRef == nil && currentRef != nil) || - (oldRef != nil && currentRef == nil) || - oldRef.String() != currentRef.String()) && - ((oldRef == nil && ref != nil) || - (oldRef != nil && ref == nil) || - (oldRef.String() != ref.String())) { - - return r.resolveRef(currentRef, ref, data, target) - } - - var res interface{} - if currentRef.String() != "" { - res, _, err = currentRef.GetPointer().Get(data) - if err != nil { - return err - } - } else { - res = data - } - - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err - } - - } - return nil -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - toFetch := *refURL - toFetch.Fragment = "" - - data, fromCache := r.cache.Get(toFetch.String()) - if !fromCache { - b, err := r.loadDoc(toFetch.String()) - if err != nil { - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(toFetch.String(), data) - } - - return data, toFetch, fromCache, nil -} -func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { - if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { - return err - } - - return nil -} - -type specExpander struct { - spec *Swagger - resolver *schemaLoader -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger) error { - resolver, err := defaultSchemaLoader(spec, nil, nil) - if err != nil { - return err - } - - for key, defintition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { - return err - } - spec.Definitions[key] = *def - } - - for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver); err != nil { - return err - } - spec.Parameters[key] = parameter - } - - for key, response := range spec.Responses { - if err := expandResponse(&response, resolver); err != nil { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver); err != nil { - return err - } - spec.Paths.Paths[key] = path - } - } - - return nil -} - -// ExpandSchema expands the refs in the schema object -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - - if schema == nil { - return nil - } - if root == nil { - root = schema - } - - nrr, _ := NewRef(schema.ID) - var rrr *Ref - if nrr.String() != "" { - switch root.(type) { - case *Schema: - rid, _ := NewRef(root.(*Schema).ID) - rrr, _ = rid.Inherits(nrr) - case *Swagger: - rid, _ := NewRef(root.(*Swagger).ID) - rrr, _ = rid.Inherits(nrr) - } - - } - - resolver, err := defaultSchemaLoader(root, rrr, cache) - if err != nil { - return err - } - - refs := []string{""} - if rrr != nil { - refs[0] = rrr.String() - } - var s *Schema - if s, err = expandSchema(*schema, refs, resolver); err != nil { - return nil - } - *schema = *s - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { - if target.Items != nil { - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - } - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { - defer func() { - schema = &target - }() - if target.Ref.String() == "" && target.Ref.IsRoot() { - target = *resolver.root.(*Schema) - return - } - - // t is the new expanded schema - var t *Schema - for target.Ref.String() != "" { - // var newTarget Schema - pRefs := strings.Join(parentRefs, ",") - pRefs += "," - if strings.Contains(pRefs, target.Ref.String()+",") { - err = nil - return - } - - if err = resolver.Resolve(&target.Ref, &t); err != nil { - return - } - parentRefs = append(parentRefs, target.Ref.String()) - target = *t - } - - if t, err = expandItems(target, parentRefs, resolver); err != nil { - return - } - target = *t - - for i := range target.AllOf { - if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { - return - } - target.AllOf[i] = *t - } - for i := range target.AnyOf { - if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { - return - } - target.AnyOf[i] = *t - } - for i := range target.OneOf { - if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { - return - } - target.OneOf[i] = *t - } - if target.Not != nil { - if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { - return - } - *target.Not = *t - } - for k, _ := range target.Properties { - if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { - return - } - target.Properties[k] = *t - } - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { - return - } - *target.AdditionalProperties.Schema = *t - } - for k, _ := range target.PatternProperties { - if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { - return - } - target.PatternProperties[k] = *t - } - for k, _ := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { - return - } - *target.Dependencies[k].Schema = *t - } - } - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { - return - } - *target.AdditionalItems.Schema = *t - } - for k, _ := range target.Definitions { - if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { - return - } - target.Definitions[k] = *t - } - return -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { - if pathItem == nil { - return nil - } - if pathItem.Ref.String() != "" { - if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { - return err - } - } - - for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { - return err - } - } - if err := expandOperation(pathItem.Get, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Head, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Options, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Put, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Post, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Patch, resolver); err != nil { - return err - } - if err := expandOperation(pathItem.Delete, resolver); err != nil { - return err - } - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader) error { - if op == nil { - return nil - } - for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver); err != nil { - return err - } - op.Parameters[i] = param - } - - if op.Responses != nil { - responses := op.Responses - if err := expandResponse(responses.Default, resolver); err != nil { - return err - } - for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver); err != nil { - return err - } - responses.StatusCodeResponses[code] = response - } - } - return nil -} - -func expandResponse(response *Response, resolver *schemaLoader) error { - if response == nil { - return nil - } - - if response.Ref.String() != "" { - if err := resolver.Resolve(&response.Ref, response); err != nil { - return err - } - } - - if response.Schema != nil { - parentRefs := []string{response.Schema.Ref.String()} - if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { - return err - } - if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { - return err - } else { - *response.Schema = *s - } - } - return nil -} - -func expandParameter(parameter *Parameter, resolver *schemaLoader) error { - if parameter == nil { - return nil - } - if parameter.Ref.String() != "" { - if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { - return err - } - } - if parameter.Schema != nil { - parentRefs := []string{parameter.Schema.Ref.String()} - if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { - return err - } - if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { - return err - } else { - *parameter.Schema = *s - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91b2b..0000000000 --- a/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 758b845310..0000000000 --- a/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = "array" - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON marshal this from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.HeaderProps); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index fb8b7c4ac5..0000000000 --- a/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, ok := v.([]interface{}) - if !ok { - return nil, false - } - var strs []string - for _, iface := range arr { - str, ok := iface.(string) - if !ok { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index 4d57ea5ca6..0000000000 --- a/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` -} - -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object- -type Items struct { - Refable - CommonValidations - SimpleSchema -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = "array" - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index f20961b4fd..0000000000 --- a/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index de1db6f020..0000000000 --- a/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - for k, v := range o.Responses.StatusCodeResponses { - if k/100 == 2 { - return &v, k, true - } - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == name { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index 8fb66d12a5..0000000000 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` // when in == "body" - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = "array" - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - if err := json.Unmarshal(data, &p.ParamProps); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 9ab3ec5383..0000000000 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// pathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - if err := json.Unmarshal(data, &p.PathItemProps); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a2901..0000000000 --- a/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index 68631df8b4..0000000000 --- a/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return r.String() - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI() bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - rr, err := http.Get(v) - if err != nil { - return false - } - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - p, e := filepath.Abs(pth) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(pth) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// // NewResolvedRef creates a resolved ref -// func NewResolvedRef(refURI string, data interface{}) Ref { -// return Ref{ -// Ref: jsonreference.MustCreateRef(refURI), -// Resolved: data, -// } -// } - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":"#"}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 308cc8478f..0000000000 --- a/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index ea071ca63d..0000000000 --- a/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return &scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index eb88f005c5..0000000000 --- a/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,628 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } - -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-,omitempty"` - Schema SchemaURL `json:"-,omitempty"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties map[string]Schema `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties map[string]Schema `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || err != nil { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{"array"} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - var sch Schema - if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.Ref); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.Schema); err != nil { - return err - } - if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil { - return err - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index 22d4f10af2..0000000000 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecuritySchemeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index cc2ae56b2b..0000000000 --- a/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "encoding/json" - -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -var ( - jsonSchema = MustLoadJSONSchemaDraft04() - swaggerSchema = MustLoadSwagger20Schema() -) - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := Asset("v2/schema.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index ff3ef875ed..0000000000 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` // must start with a leading "/" - Paths *Paths `json:"paths"` // required - Definitions Definitions `json:"definitions"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return nil, nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch single.(type) { - case string: - *s = StringOrArray([]string{single.(string)}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index 97f555840c..0000000000 --- a/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{description, name, externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a46703d..0000000000 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/vendor/github.com/go-openapi/swag/.drone.sec b/vendor/github.com/go-openapi/swag/.drone.sec deleted file mode 100644 index 9cc7e64a2c..0000000000 --- a/vendor/github.com/go-openapi/swag/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.darMHuSYnLhsknrnqjdXLGcyEJ5kM13yQbLGi2UWcKXpddHvPT3dMCnP5y7e27c76R2HFnvr56BqDMI-x0zyuQtjYKzSFUGjOcEhOH3OL1Hxu-Cm2z2443BS8asXNA3sEveAGQvG68jffm7CvtEtMo57wBpggI7UHbfIdLMK47s1EDpC4xanjiS1BJe5NC_Ikf0jfa6vf18oggbjxuoqSEvSNVdNRyZwG_npFaZFJzvdtehTG7GqunWjGiqBb81qNcEdzSdIZW7A_Esv4U-nOL5gGr55E9jKhv3bX4Z9ygGcSrJ3NCgR_3zRhYKkPEAOXIqQKfL6-h82BY--cHq9uw.NHL3X-1tjb8a8zF7.eRmLvOG32e7260K8rkI-HmUGG5Gb6Hu-obKKxBqHd-vVzsKnwTVJavLWktPqlXGMsnDt7MimSysNqsWemMUEviW2p3godnvjOOXTDb-RAtQ-39rvxnZ2bN8qwUVFrdiKTZD06l60yTeLW7L1psyLj50NxklFObhkpUcK5uukxLXT1SzGM9aY6_3dzW4HU9pZGQrIH1pj1UzvWIjz7iIzE1a37DHBN-FiYSASsw01v1SSIFr34gwlGcqzGfJBonffVrM4ordm3IiVm50Zvr25DrmYTKrQpJRB-KOvYxBNYDzjCaanHDyWGUGN44FUx38azHHEVBTaiOM7xwPeyCc-xTTv8WXGnL1xrhL3M_jNuwnbAjzL9X_li7KUSeYajwhGihdMZaHLYaqxh3NNnbPfYhR6sBxu8vaT1Sc4eE84QC4dV4OaAglPvrPdWL-DC7OYQyoPU8u9ggwUQHpFUzJyD549T_Tlgn-2Cw7kTe41VonH9HkoXGANDGtQCGTqTIEeFQJ3MDDucf5VteFP8_SJPfyJYxpStFt5U1AuULV9sXmpGQL_-GGFXowd0X0bHxFeo_eu1vm-oTqQQNbKRnyt5V3n4U9jhOUGnnIBy3JOG3DA2YhVJsHdlLZ9vaDpFYcxts4.SqYfES30FqVSufGbPZ6YXA \ No newline at end of file diff --git a/vendor/github.com/go-openapi/swag/.drone.yml b/vendor/github.com/go-openapi/swag/.drone.yml deleted file mode 100644 index acf10fdfbd..0000000000 --- a/vendor/github.com/go-openapi/swag/.drone.yml +++ /dev/null @@ -1,32 +0,0 @@ -clone: - path: github.com/go-openapi/swag - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify - - go get -u github.com/mailru/easyjson - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore deleted file mode 100644 index 769c244007..0000000000 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/vendor/github.com/go-openapi/swag/.pullapprove.yml b/vendor/github.com/go-openapi/swag/.pullapprove.yml deleted file mode 100644 index 5ec183e224..0000000000 --- a/vendor/github.com/go-openapi/swag/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e3..0000000000 --- a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/go-openapi/swag/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md deleted file mode 100644 index c1d3c196ca..0000000000 --- a/vendor/github.com/go-openapi/swag/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Swag [![Build Status](https://ci.vmware.run/api/badges/go-openapi/swag/status.svg)](https://ci.vmware.run/go-openapi/swag) [![Coverage](https://coverage.vmware.run/badges/go-openapi/swag/coverage.svg)](https://coverage.vmware.run/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) - -Contains a bunch of helper functions: - -* convert between value and pointers for builtins -* convert from string to builtin -* fast json concatenation -* search in path -* load from file or http -* name manglin \ No newline at end of file diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go deleted file mode 100644 index 28d9124106..0000000000 --- a/vendor/github.com/go-openapi/swag/convert.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "strconv" - "strings" -) - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive -func IsFloat64AJSONInteger(f float64) bool { - if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { - return false - } - - return f == float64(int64(f)) || f == float64(uint64(f)) -} - -var evaluatesAsTrue = map[string]struct{}{ - "true": struct{}{}, - "1": struct{}{}, - "yes": struct{}{}, - "ok": struct{}{}, - "y": struct{}{}, - "on": struct{}{}, - "selected": struct{}{}, - "checked": struct{}{}, - "t": struct{}{}, - "enabled": struct{}{}, -} - -// ConvertBool turn a string into a boolean -func ConvertBool(str string) (bool, error) { - _, ok := evaluatesAsTrue[strings.ToLower(str)] - return ok, nil -} - -// ConvertFloat32 turn a string into a float32 -func ConvertFloat32(str string) (float32, error) { - f, err := strconv.ParseFloat(str, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// ConvertFloat64 turn a string into a float64 -func ConvertFloat64(str string) (float64, error) { - return strconv.ParseFloat(str, 64) -} - -// ConvertInt8 turn a string into int8 boolean -func ConvertInt8(str string) (int8, error) { - i, err := strconv.ParseInt(str, 10, 8) - if err != nil { - return 0, err - } - return int8(i), nil -} - -// ConvertInt16 turn a string into a int16 -func ConvertInt16(str string) (int16, error) { - i, err := strconv.ParseInt(str, 10, 16) - if err != nil { - return 0, err - } - return int16(i), nil -} - -// ConvertInt32 turn a string into a int32 -func ConvertInt32(str string) (int32, error) { - i, err := strconv.ParseInt(str, 10, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// ConvertInt64 turn a string into a int64 -func ConvertInt64(str string) (int64, error) { - return strconv.ParseInt(str, 10, 64) -} - -// ConvertUint8 turn a string into a uint8 -func ConvertUint8(str string) (uint8, error) { - i, err := strconv.ParseUint(str, 10, 8) - if err != nil { - return 0, err - } - return uint8(i), nil -} - -// ConvertUint16 turn a string into a uint16 -func ConvertUint16(str string) (uint16, error) { - i, err := strconv.ParseUint(str, 10, 16) - if err != nil { - return 0, err - } - return uint16(i), nil -} - -// ConvertUint32 turn a string into a uint32 -func ConvertUint32(str string) (uint32, error) { - i, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// ConvertUint64 turn a string into a uint64 -func ConvertUint64(str string) (uint64, error) { - return strconv.ParseUint(str, 10, 64) -} - -// FormatBool turns a boolean into a string -func FormatBool(value bool) string { - return strconv.FormatBool(value) -} - -// FormatFloat32 turns a float32 into a string -func FormatFloat32(value float32) string { - return strconv.FormatFloat(float64(value), 'f', -1, 32) -} - -// FormatFloat64 turns a float64 into a string -func FormatFloat64(value float64) string { - return strconv.FormatFloat(value, 'f', -1, 64) -} - -// FormatInt8 turns an int8 into a string -func FormatInt8(value int8) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt16 turns an int16 into a string -func FormatInt16(value int16) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt32 turns an int32 into a string -func FormatInt32(value int32) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt64 turns an int64 into a string -func FormatInt64(value int64) string { - return strconv.FormatInt(value, 10) -} - -// FormatUint8 turns an uint8 into a string -func FormatUint8(value uint8) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint16 turns an uint16 into a string -func FormatUint16(value uint16) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint32 turns an uint32 into a string -func FormatUint32(value uint32) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint64 turns an uint64 into a string -func FormatUint64(value uint64) string { - return strconv.FormatUint(value, 10) -} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go deleted file mode 100644 index c95e4e78bd..0000000000 --- a/vendor/github.com/go-openapi/swag/convert_types.go +++ /dev/null @@ -1,595 +0,0 @@ -package swag - -import "time" - -// This file was taken from the aws go sdk - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to of the int64 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int64 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pouinter to of the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pouinter passed in or -// 0 if the pouinter is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pouinters -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pouinters uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pouinters -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pouinters uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pouinter to of the uint64 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint64 values uinto a slice of -// uint32 pouinters -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values uinto a string -// map of uint32 pouinters -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pouinters uinto a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pouinter to of the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values uinto a slice of -// uint64 pouinters -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values uinto a string -// map of uint64 pouinters -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pouinters uinto a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go deleted file mode 100644 index 6e9ec20fc6..0000000000 --- a/vendor/github.com/go-openapi/swag/json.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "bytes" - "encoding/json" - "reflect" - "strings" - "sync" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// DefaultJSONNameProvider the default cache for types -var DefaultJSONNameProvider = NewNameProvider() - -const comma = byte(',') - -var closers = map[byte]byte{ - '{': '}', - '[': ']', -} - -type ejMarshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -type ejUnmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller -// so it takes the fastest option available. -func WriteJSON(data interface{}) ([]byte, error) { - if d, ok := data.(ejMarshaler); ok { - jw := new(jwriter.Writer) - d.MarshalEasyJSON(jw) - return jw.BuildBytes() - } - if d, ok := data.(json.Marshaler); ok { - return d.MarshalJSON() - } - return json.Marshal(data) -} - -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller -// so it takes the fastes option available -func ReadJSON(data []byte, value interface{}) error { - if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: data} - d.UnmarshalEasyJSON(jl) - return jl.Error() - } - if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(data) - } - return json.Unmarshal(data, value) -} - -// DynamicJSONToStruct converts an untyped json structure into a struct -func DynamicJSONToStruct(data interface{}, target interface{}) error { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := WriteJSON(data) - if err != nil { - return err - } - if err := ReadJSON(b, target); err != nil { - return err - } - return nil -} - -// ConcatJSON concatenates multiple json objects efficiently -func ConcatJSON(blobs ...[]byte) []byte { - if len(blobs) == 0 { - return nil - } - if len(blobs) == 1 { - return blobs[0] - } - - last := len(blobs) - 1 - var opening, closing byte - a := 0 - idx := 0 - buf := bytes.NewBuffer(nil) - - for i, b := range blobs { - if len(b) > 0 && opening == 0 { // is this an array or an object? - opening, closing = b[0], closers[b[0]] - } - - if opening != '{' && opening != '[' { - continue // don't know how to concatenate non container objects - } - - if len(b) < 3 { // yep empty but also the last one, so closing this thing - if i == last && a > 0 { - buf.WriteByte(closing) - } - continue - } - - idx = 0 - if a > 0 { // we need to join with a comma for everything beyond the first non-empty item - buf.WriteByte(comma) - idx = 1 // this is not the first or the last so we want to drop the leading bracket - } - - if i != last { // not the last one, strip brackets - buf.Write(b[idx : len(b)-1]) - } else { // last one, strip only the leading bracket - buf.Write(b[idx:]) - } - a++ - } - // somehow it ended up being empty, so provide a default value - if buf.Len() == 0 { - buf.WriteByte(opening) - buf.WriteByte(closing) - } - return buf.Bytes() -} - -// ToDynamicJSON turns an object into a properly JSON typed structure -func ToDynamicJSON(data interface{}) interface{} { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, _ := json.Marshal(data) - var res interface{} - json.Unmarshal(b, &res) - return res -} - -// FromDynamicJSON turns an object into a properly JSON typed structure -func FromDynamicJSON(data, target interface{}) error { - b, _ := json.Marshal(data) - return json.Unmarshal(b, target) -} - -// NameProvider represents an object capabale of translating from go property names -// to json property names -// This type is thread-safe. -type NameProvider struct { - lock *sync.Mutex - index map[reflect.Type]nameIndex -} - -type nameIndex struct { - jsonNames map[string]string - goNames map[string]string -} - -// NewNameProvider creates a new name provider -func NewNameProvider() *NameProvider { - return &NameProvider{ - lock: &sync.Mutex{}, - index: make(map[reflect.Type]nameIndex), - } -} - -func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { - for i := 0; i < tpe.NumField(); i++ { - targetDes := tpe.Field(i) - - if targetDes.PkgPath != "" { // unexported - continue - } - - if targetDes.Anonymous { // walk embedded structures tree down first - buildnameIndex(targetDes.Type, idx, reverseIdx) - continue - } - - if tag := targetDes.Tag.Get("json"); tag != "" { - - parts := strings.Split(tag, ",") - if len(parts) == 0 { - continue - } - - nm := parts[0] - if nm == "-" { - continue - } - if nm == "" { // empty string means we want to use the Go name - nm = targetDes.Name - } - - idx[nm] = targetDes.Name - reverseIdx[targetDes.Name] = nm - } - } -} - -func newNameIndex(tpe reflect.Type) nameIndex { - var idx = make(map[string]string, tpe.NumField()) - var reverseIdx = make(map[string]string, tpe.NumField()) - - buildnameIndex(tpe, idx, reverseIdx) - return nameIndex{jsonNames: idx, goNames: reverseIdx} -} - -// GetJSONNames gets all the json property names for a type -func (n *NameProvider) GetJSONNames(subject interface{}) []string { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - - var res []string - for k := range names.jsonNames { - res = append(res, k) - } - return res -} - -// GetJSONName gets the json name for a go property name -func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetJSONNameForType(tpe, name) -} - -// GetJSONNameForType gets the json name for a go property name on a given type -func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.goNames[name] - return nme, ok -} - -func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { - n.lock.Lock() - defer n.lock.Unlock() - names := newNameIndex(tpe) - n.index[tpe] = names - return names -} - -// GetGoName gets the go name for a json property name -func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetGoNameForType(tpe, name) -} - -// GetGoNameForType gets the go name for a given type for a json property name -func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.jsonNames[name] - return nme, ok -} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go deleted file mode 100644 index 6dbc31330e..0000000000 --- a/vendor/github.com/go-openapi/swag/loading.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path) -} - -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { - return remote - } - return local -} - -func loadHTTPBytes(path string) ([]byte, error) { - resp, err := http.Get(path) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) - } - - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go deleted file mode 100644 index 8323fa37b6..0000000000 --- a/vendor/github.com/go-openapi/swag/net.go +++ /dev/null @@ -1,24 +0,0 @@ -package swag - -import ( - "net" - "strconv" -) - -// SplitHostPort splits a network address into a host and a port. -// The port is -1 when there is no port to be found -func SplitHostPort(addr string) (host string, port int, err error) { - h, p, err := net.SplitHostPort(addr) - if err != nil { - return "", -1, err - } - if p == "" { - return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} - } - - pi, err := strconv.Atoi(p) - if err != nil { - return "", -1, err - } - return h, pi, nil -} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go deleted file mode 100644 index 273e9fbed9..0000000000 --- a/vendor/github.com/go-openapi/swag/path.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "os" - "path/filepath" - "runtime" - "strings" -) - -const ( - // GOPATHKey represents the env key for gopath - GOPATHKey = "GOPATH" -) - -// FindInSearchPath finds a package in a provided lists of paths -func FindInSearchPath(searchPath, pkg string) string { - pathsList := filepath.SplitList(searchPath) - for _, path := range pathsList { - if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { - if _, err := os.Stat(evaluatedPath); err == nil { - return evaluatedPath - } - } - } - return "" -} - -// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT -func FindInGoSearchPath(pkg string) string { - return FindInSearchPath(FullGoSearchPath(), pkg) -} - -// FullGoSearchPath gets the search paths for finding packages -func FullGoSearchPath() string { - allPaths := os.Getenv(GOPATHKey) - if allPaths != "" { - allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") - } else { - allPaths = runtime.GOROOT() - } - return allPaths -} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go deleted file mode 100644 index f2f76070bb..0000000000 --- a/vendor/github.com/go-openapi/swag/util.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "reflect" - "regexp" - "sort" - "strings" -) - -// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698 -var commonInitialisms = map[string]bool{ - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UUID": true, - "UID": true, - "UI": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XSRF": true, - "XSS": true, -} -var initialisms []string - -func init() { - for k := range commonInitialisms { - initialisms = append(initialisms, k) - } - sort.Sort(sort.Reverse(byLength(initialisms))) -} - -// JoinByFormat joins a string array by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func JoinByFormat(data []string, format string) []string { - if len(data) == 0 { - return data - } - var sep string - switch format { - case "ssv": - sep = " " - case "tsv": - sep = "\t" - case "pipes": - sep = "|" - case "multi": - return data - default: - sep = "," - } - return []string{strings.Join(data, sep)} -} - -// SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func SplitByFormat(data, format string) []string { - if data == "" { - return nil - } - var sep string - switch format { - case "ssv": - sep = " " - case "tsv": - sep = "\t" - case "pipes": - sep = "|" - case "multi": - return nil - default: - sep = "," - } - var result []string - for _, s := range strings.Split(data, sep) { - if ts := strings.TrimSpace(s); ts != "" { - result = append(result, ts) - } - } - return result -} - -type byLength []string - -func (s byLength) Len() int { - return len(s) -} -func (s byLength) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byLength) Less(i, j int) bool { - return len(s[i]) < len(s[j]) -} - -// Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) (words []string) { - repl := strings.NewReplacer( - "@", "At ", - "&", "And ", - "|", "Pipe ", - "$", "Dollar ", - "!", "Bang ", - "-", " ", - "_", " ", - ) - - rex1 := regexp.MustCompile(`(\p{Lu})`) - rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) - - str = trim(str) - - // Convert dash and underscore to spaces - str = repl.Replace(str) - - // Split when uppercase is found (needed for Snake) - str = rex1.ReplaceAllString(str, " $1") - // check if consecutive single char things make up an initialism - - for _, k := range initialisms { - str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) - } - // Get the final list of words - words = rex2.FindAllString(str, -1) - - return -} - -// Removes leading whitespaces -func trim(str string) string { - return strings.Trim(str, " ") -} - -// Shortcut to strings.ToUpper() -func upper(str string) string { - return strings.ToUpper(trim(str)) -} - -// Shortcut to strings.ToLower() -func lower(str string) string { - return strings.ToLower(trim(str)) -} - -// ToFileName lowercases and underscores a go type name -func ToFileName(name string) string { - var out []string - for _, w := range split(name) { - out = append(out, lower(w)) - } - return strings.Join(out, "_") -} - -// ToCommandName lowercases and underscores a go type name -func ToCommandName(name string) string { - var out []string - for _, w := range split(name) { - out = append(out, lower(w)) - } - return strings.Join(out, "-") -} - -// ToHumanNameLower represents a code name as a human series of words -func ToHumanNameLower(name string) string { - var out []string - for _, w := range split(name) { - if !commonInitialisms[upper(w)] { - out = append(out, lower(w)) - } else { - out = append(out, w) - } - } - return strings.Join(out, " ") -} - -// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized -func ToHumanNameTitle(name string) string { - var out []string - for _, w := range split(name) { - uw := upper(w) - if !commonInitialisms[uw] { - out = append(out, upper(w[:1])+lower(w[1:])) - } else { - out = append(out, w) - } - } - return strings.Join(out, " ") -} - -// ToJSONName camelcases a name which can be underscored or pascal cased -func ToJSONName(name string) string { - var out []string - for i, w := range split(name) { - if i == 0 { - out = append(out, lower(w)) - continue - } - out = append(out, upper(w[:1])+lower(w[1:])) - } - return strings.Join(out, "") -} - -// ToVarName camelcases a name which can be underscored or pascal cased -func ToVarName(name string) string { - res := ToGoName(name) - if _, ok := commonInitialisms[res]; ok { - return lower(res) - } - if len(res) <= 1 { - return lower(res) - } - return lower(res[:1]) + res[1:] -} - -// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes -func ToGoName(name string) string { - var out []string - for _, w := range split(name) { - uw := upper(w) - mod := int(math.Min(float64(len(uw)), 2)) - if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] { - uw = upper(w[:1]) + lower(w[1:]) - } - out = append(out, uw) - } - return strings.Join(out, "") -} - -// ContainsStringsCI searches a slice of strings for a case-insensitive match -func ContainsStringsCI(coll []string, item string) bool { - for _, a := range coll { - if strings.EqualFold(a, item) { - return true - } - } - return false -} - -type zeroable interface { - IsZero() bool -} - -// IsZero returns true when the value passed into the function is a zero value. -// This allows for safer checking of interface values. -func IsZero(data interface{}) bool { - // check for things that have an IsZero method instead - if vv, ok := data.(zeroable); ok { - return vv.IsZero() - } - // continue with slightly more complex reflection - v := reflect.ValueOf(data) - switch v.Kind() { - case reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - case reflect.Struct, reflect.Array: - return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) - case reflect.Invalid: - return true - } - return false -} - -// CommandLineOptionsGroup represents a group of user-defined command line options -type CommandLineOptionsGroup struct { - ShortDescription string - LongDescription string - Options interface{} -} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a93..0000000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575b35..3cd3249f70 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 04dcb88130..d9aa3c42d6 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -61,7 +55,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +71,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +94,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -173,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -217,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -301,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -311,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -328,547 +375,54 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 0000000000..dea2617ced --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8c1b8fd1f6..c27d35f866 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -39,7 +39,6 @@ import ( "errors" "fmt" "reflect" - "sort" ) // RequiredNotSetError is the error returned if Marshal is called with @@ -82,10 +81,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,10 +165,6 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. @@ -177,10 +173,6 @@ func (p *Buffer) EncodeZigzag64(x uint64) error { return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - // EncodeZigzag32 writes a zigzag-encoded 32-bit integer // to the Buffer. // This is the format used for the sint32 protocol buffer type. @@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,326 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -545,819 +219,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { - return err - } - v, _ := exts.extensionsRead() - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 8b16f951c7..d4db5a1c14 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -54,13 +54,17 @@ Equality is defined in this way: in a proto3 .proto file, fields are not "set"; specifically, zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. @@ -105,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -142,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -257,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -272,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 6b9b363746..816a3b9d6c 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -154,6 +167,7 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { @@ -178,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -204,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -249,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -335,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -351,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -387,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -404,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -438,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -472,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -493,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -522,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -549,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index ac4ddbc075..0e2191b8ad 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. @@ -266,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -274,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -310,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -344,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -832,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -856,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -896,3 +909,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982decd6..3b6ca41d5e 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2e16..b6cad90834 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d47c..d55a335d94 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 69ddda8d4b..f710adab09 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -187,36 +144,19 @@ type Properties struct { Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) @@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,28 +469,65 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } // MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000000..0f212b3029 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2681 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000000..5525def6a5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000000..55f0340a3f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,1967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0) + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf03..2205fdaadf 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 7e6f145a10..0685bae36d 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -592,7 +578,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -724,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -792,12 +785,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: - // Either "true", "false", 1 or 0. + // true/1/t/True or false/f/0/False. switch tok.value { - case "true", "1": + case "true", "1", "t", "True": fv.SetBool(true) return nil - case "false", "0": + case "false", "0", "f", "False": fv.SetBool(false) return nil } @@ -879,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 89e07ae192..b2af97f4a9 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -51,6 +51,9 @@ const googleApis = "type.googleapis.com/" // function. AnyMessageName is provided for less common use cases like filtering a // sequence of Any messages based on a set of allowed message type names. func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } slash := strings.LastIndex(any.TypeUrl, "/") if slash < 0 { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f2c6906b91..4ab3551b7f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,16 +1,6 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/any/any.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - github.com/golang/protobuf/ptypes/any/any.proto - -It has these top-level messages: - Any -*/ package any import proto "github.com/golang/protobuf/proto" @@ -63,6 +53,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' @@ -123,33 +123,69 @@ type Any struct { // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ - // 187 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, - 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, - 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, - 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, - 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, - 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, - 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, - 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, - 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +var fileDescriptor_any_744b9ca530f228db = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 81dcf46ccf..c748667623 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/any"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -75,6 +74,16 @@ option objc_class_prefix = "GPB"; // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 569748346d..0333aebfef 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,16 +1,6 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - github.com/golang/protobuf/ptypes/duration/duration.proto - -It has these top-level messages: - Duration -*/ package duration import proto "github.com/golang/protobuf/proto" @@ -35,6 +25,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -69,10 +61,27 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // end.nanos -= 1000000000; // } // +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 @@ -80,35 +89,71 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) } -var fileDescriptor0 = []byte{ - // 189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, - 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, - 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, - 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, - 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, - 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, - 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, - 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, - 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto index 96c1796d65..975fce41aa 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -33,11 +33,11 @@ syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; option go_package = "github.com/golang/protobuf/ptypes/duration"; option java_package = "com.google.protobuf"; option java_outer_classname = "DurationProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Duration represents a signed, fixed-length span of time represented @@ -47,6 +47,8 @@ option objc_class_prefix = "GPB"; // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -81,11 +83,28 @@ option objc_class_prefix = "GPB"; // end.nanos -= 1000000000; // } // +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years int64 seconds = 1; // Signed fractions of a second at nanosecond resolution of the span diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh deleted file mode 100755 index 2a5b4e8bdc..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash -e -# -# This script fetches and rebuilds the "well-known types" protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=github.com/golang/protobuf/ptypes -UPSTREAM=https://github.com/google/protobuf -UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=' - any.proto - duration.proto - empty.proto - struct.proto - timestamp.proto - wrappers.proto -' - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir -# Pass 1: build mapping from upstream filename to our filename. -declare -A filename_map -for f in $(cd $PKG && find * -name '*.proto'); do - echo -n 1>&2 "looking for latest version of $f... " - up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) - echo 1>&2 $up - if [ $(echo $up | wc -w) != "1" ]; then - die "not exactly one match" - fi - filename_map[$up]=$f -done -# Pass 2: copy files -for up in "${!filename_map[@]}"; do - f=${filename_map[$up]} - shortname=$(basename $f | sed 's,\.proto$,,') - cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f -done - -# Run protoc once per package. -for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done -echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 1b36576220..47f10dbc2c 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -99,6 +99,15 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { return t, validateTimestamp(ts) } +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index ffcc51594a..b590be7b5f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,16 +1,6 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -It has these top-level messages: - Timestamp -*/ package timestamp import proto "github.com/golang/protobuf/proto" @@ -40,6 +30,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -77,51 +69,107 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // Example 5: Compute Timestamp from current time in Python. // -// now = time.time() -// seconds = int(now) -// nanos = int((now - seconds) * 10**9) -// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) } -var fileDescriptor0 = []byte{ - // 194 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, - 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, - 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, - 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, - 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, - 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, - 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, - 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, - 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, - 0x00, 0x00, +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index 7992a85886..06750ab1f1 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -38,7 +38,6 @@ option go_package = "github.com/golang/protobuf/ptypes/timestamp"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone @@ -53,6 +52,8 @@ option objc_class_prefix = "GPB"; // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -90,16 +91,37 @@ option objc_class_prefix = "GPB"; // // Example 5: Compute Timestamp from current time in Python. // -// now = time.time() -// seconds = int(now) -// nanos = int((now - seconds) * 10**9) -// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index eb74b1d39c..fc5aaaa13a 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -22,7 +22,7 @@ // See some discussion on the matter here: // http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html // Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. +// implmentation written about there. // // Within this tree, each node contains a slice of items and a (possibly nil) // slice of children. For basic numeric values or raw structs, this can cause @@ -44,7 +44,7 @@ // widely used ordered tree implementation in the Go ecosystem currently. // Its functions, therefore, exactly mirror those of // llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. +// support storing multiple equivalent values or backwards iteration. package btree import ( @@ -52,7 +52,6 @@ import ( "io" "sort" "strings" - "sync" ) // Item represents a single object in the tree. @@ -69,17 +68,11 @@ const ( DefaultFreeListSize = 32 ) -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - // FreeList represents a free list of btree nodes. By default each // BTree has its own FreeList, but multiple BTrees can share the same // FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. +// Two Btrees using the same freelist are not safe for concurrent write access. type FreeList struct { - mu sync.Mutex freelist []*node } @@ -90,25 +83,18 @@ func NewFreeList(size int) *FreeList { } func (f *FreeList) newNode() (n *node) { - f.mu.Lock() index := len(f.freelist) - 1 if index < 0 { - f.mu.Unlock() return new(node) } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() + f.freelist, n = f.freelist[:index], f.freelist[index] return } func (f *FreeList) freeNode(n *node) { - f.mu.Lock() if len(f.freelist) < cap(f.freelist) { f.freelist = append(f.freelist, n) } - f.mu.Unlock() } // ItemIterator allows callers of Ascend* to iterate in-order over portions of @@ -130,8 +116,8 @@ func NewWithFreeList(degree int, f *FreeList) *BTree { panic("bad degree") } return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, + degree: degree, + freelist: f, } } @@ -152,8 +138,8 @@ func (s *items) insertAt(index int, item Item) { // back. func (s *items) removeAt(index int) Item { item := (*s)[index] + (*s)[index] = nil copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil *s = (*s)[:len(*s)-1] return item } @@ -167,16 +153,6 @@ func (s *items) pop() (out Item) { return } -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - // find returns the index where the given item should be inserted into this // list. 'found' is true if the item already exists in the list at the given // index. @@ -207,8 +183,8 @@ func (s *children) insertAt(index int, n *node) { // back. func (s *children) removeAt(index int) *node { n := (*s)[index] + (*s)[index] = nil copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil *s = (*s)[:len(*s)-1] return n } @@ -222,16 +198,6 @@ func (s *children) pop() (out *node) { return } -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - // node is an internal node in a tree. // // It must at all times maintain the invariant that either @@ -240,34 +206,7 @@ func (s *children) truncate(index int) { type node struct { items items children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c + t *BTree } // split splits the given node at the given index. The current node shrinks, @@ -275,12 +214,12 @@ func (n *node) mutableChild(i int) *node { // containing all items/children after it. func (n *node) split(i int) (Item, *node) { item := n.items[i] - next := n.cow.newNode() + next := n.t.newNode() next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) + n.items = n.items[:i] if len(n.children) > 0 { next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) + n.children = n.children[:i+1] } return item, next } @@ -291,7 +230,7 @@ func (n *node) maybeSplitChild(i, maxItems int) bool { if len(n.children[i].items) < maxItems { return false } - first := n.mutableChild(i) + first := n.children[i] item, second := first.split(maxItems / 2) n.items.insertAt(i, item) n.children.insertAt(i+1, second) @@ -325,7 +264,7 @@ func (n *node) insert(item Item, maxItems int) Item { return out } } - return n.mutableChild(i).insert(item, maxItems) + return n.children[i].insert(item, maxItems) } // get finds the given key in the subtree and returns it. @@ -403,10 +342,10 @@ func (n *node) remove(item Item, minItems int, typ toRemove) Item { panic("invalid type") } // If we get to here, we have children. - if len(n.children[i].items) <= minItems { + child := n.children[i] + if len(child.items) <= minItems { return n.growChildAndRemove(i, item, minItems, typ) } - child := n.mutableChild(i) // Either we had enough items to begin with, or we've done some // merging/stealing, because we've got enough now and we're ready to return // stuff. @@ -445,10 +384,10 @@ func (n *node) remove(item Item, minItems int, typ toRemove) Item { // whether we're in case 1 or 2), we'll have enough items and can guarantee // that we hit case A. func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + child := n.children[i] if i > 0 && len(n.children[i-1].items) > minItems { // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) + stealFrom := n.children[i-1] stolenItem := stealFrom.items.pop() child.items.insertAt(0, n.items[i-1]) n.items[i-1] = stolenItem @@ -457,8 +396,7 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) } } else if i < len(n.items) && len(n.children[i+1].items) > minItems { // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) + stealFrom := n.children[i+1] stolenItem := stealFrom.items.removeAt(0) child.items = append(child.items, n.items[i]) n.items[i] = stolenItem @@ -468,90 +406,47 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) } else { if i >= len(n.items) { i-- + child = n.children[i] } - child := n.mutableChild(i) // merge with right child mergeItem := n.items.removeAt(i) mergeChild := n.children.removeAt(i + 1) child.items = append(child.items, mergeItem) child.items = append(child.items, mergeChild.items...) child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) + n.t.freeNode(mergeChild) } return n.remove(item, minItems, typ) } -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - // iterate provides a simple method for iterating over elements in the tree. +// It could probably use some work to be extra-efficient (it calls from() a +// little more than it should), but it works pretty well for now. // -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok bool - switch dir { - case ascend: - for i := 0; i < len(n.items); i++ { - if start != nil && n.items[i].Less(start) { - continue - } - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } +// It requires that 'from' and 'to' both return true for values we should hit +// with the iterator. It should also be the case that 'from' returns true for +// values less than or equal to values 'to' returns true for, and 'to' +// returns true for values greater than or equal to those that 'from' +// does. +func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool { + for i, item := range n.items { + if !from(item) { + continue } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } + if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) { + return false } - case descend: - for i := len(n.items) - 1; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } + if !to(item) { + return false } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } + if !iter(item) { + return false } } - return hit, true + if len(n.children) > 0 { + return n.children[len(n.children)-1].iterate(from, to, iter) + } + return true } // Used for testing/debugging purposes. @@ -570,54 +465,12 @@ func (n *node) print(w io.Writer, level int) { // Write operations are not safe for concurrent mutation by multiple // goroutines, but Read operations are. type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { + degree int + length int + root *node freelist *FreeList } -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - // maxItems returns the max number of items to allow per node. func (t *BTree) maxItems() int { return t.degree*2 - 1 @@ -629,20 +482,23 @@ func (t *BTree) minItems() int { return t.degree - 1 } -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c +func (t *BTree) newNode() (n *node) { + n = t.freelist.newNode() + n.t = t return } -func (c *copyOnWriteContext) freeNode(n *node) { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - c.freelist.freeNode(n) +func (t *BTree) freeNode(n *node) { + for i := range n.items { + n.items[i] = nil // clear to allow GC + } + n.items = n.items[:0] + for i := range n.children { + n.children[i] = nil // clear to allow GC } + n.children = n.children[:0] + n.t = nil // clear to allow GC + t.freelist.freeNode(n) } // ReplaceOrInsert adds the given item to the tree. If an item in the tree @@ -655,19 +511,16 @@ func (t *BTree) ReplaceOrInsert(item Item) Item { panic("nil item being added to BTree") } if t.root == nil { - t.root = t.cow.newNode() + t.root = t.newNode() t.root.items = append(t.root.items, item) t.length++ return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } + } else if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) } out := t.root.insert(item, t.maxItems()) if out == nil { @@ -698,12 +551,11 @@ func (t *BTree) deleteItem(item Item, typ toRemove) Item { if t.root == nil || len(t.root.items) == 0 { return nil } - t.root = t.root.mutableFor(t.cow) out := t.root.remove(item, t.minItems(), typ) if len(t.root.items) == 0 && len(t.root.children) > 0 { oldroot := t.root t.root = t.root.children[0] - t.cow.freeNode(oldroot) + t.freeNode(oldroot) } if out != nil { t.length-- @@ -717,7 +569,10 @@ func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator if t.root == nil { return } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) + t.root.iterate( + func(a Item) bool { return !a.Less(greaterOrEqual) }, + func(a Item) bool { return a.Less(lessThan) }, + iterator) } // AscendLessThan calls the iterator for every value in the tree within the range @@ -726,7 +581,10 @@ func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { if t.root == nil { return } - t.root.iterate(ascend, nil, pivot, false, false, iterator) + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return a.Less(pivot) }, + iterator) } // AscendGreaterOrEqual calls the iterator for every value in the tree within @@ -735,7 +593,10 @@ func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { if t.root == nil { return } - t.root.iterate(ascend, pivot, nil, true, false, iterator) + t.root.iterate( + func(a Item) bool { return !a.Less(pivot) }, + func(a Item) bool { return true }, + iterator) } // Ascend calls the iterator for every value in the tree within the range @@ -744,43 +605,10 @@ func (t *BTree) Ascend(iterator ItemIterator) { if t.root == nil { return } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) + t.root.iterate( + func(a Item) bool { return true }, + func(a Item) bool { return true }, + iterator) } // Get looks for the key item in the tree, returning it. It returns nil if diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 68fcf2cabb..64869af347 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -14,21 +14,21 @@ This is useful for testing: Import with ```import "github.com/google/gofuzz"``` You can use it on single variables: -``` +```go f := fuzz.New() var myInt int f.Fuzz(&myInt) // myInt gets a random value. ``` You can use it on maps: -``` +```go f := fuzz.New().NilChance(0).NumElements(1, 1) var myMap map[ComplexKeyType]string f.Fuzz(&myMap) // myMap will have exactly one element. ``` Customize the chance of getting a nil pointer: -``` +```go f := fuzz.New().NilChance(.5) var fancyStruct struct { A, B, C, D *string @@ -37,7 +37,7 @@ f.Fuzz(&fancyStruct) // About half the pointers should be set. ``` You can even customize the randomization completely if needed: -``` +```go type MyEnum string const ( A MyEnum = "A" diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 42d9a48b3e..4f888fbc8f 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -129,7 +129,7 @@ func (f *Fuzzer) genElementCount() int { if f.minElements == f.maxElements { return f.minElements } - return f.minElements + f.r.Intn(f.maxElements-f.minElements) + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) } func (f *Fuzzer) genShouldFill() bool { @@ -229,12 +229,19 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { return } v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if f.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + f.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { f.doFuzz(v.Field(i), 0) } - case reflect.Array: - fallthrough case reflect.Chan: fallthrough case reflect.Func: diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index 2d4b3303db..604a46a6a1 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -15,7 +15,6 @@ package compiler import ( - "errors" "fmt" "gopkg.in/yaml.v2" "io/ioutil" @@ -54,16 +53,11 @@ func FetchFile(fileurl string) ([]byte, error) { } return bytes, nil } - if verboseReader { - log.Printf("Fetching %s", fileurl) - } + log.Printf("Fetching %s", fileurl) response, err := http.Get(fileurl) if err != nil { return nil, err } - if response.StatusCode != 200 { - return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) - } defer response.Body.Close() bytes, err = ioutil.ReadAll(response.Body) if err == nil { diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index 7c6b914967..b14f1f945f 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -193,7 +193,7 @@ func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore new file mode 100644 index 0000000000..df9048a010 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.gitignore @@ -0,0 +1,2 @@ +**/*.swp +.idea diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml new file mode 100644 index 0000000000..59c4194952 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -0,0 +1,21 @@ +language: go +sudo: false +install: +- go get golang.org/x/crypto/ssh +- go get -v -tags 'fixtures acceptance' ./... +- go get github.com/wadey/gocovmerge +- go get github.com/mattn/goveralls +- go get golang.org/x/tools/cmd/goimports +go: +- 1.8 +- tip +env: + global: + - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" +before_script: +- go vet ./... +script: +- ./script/coverage +- ./script/format +after_success: +- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml new file mode 100644 index 0000000000..c259d03e18 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -0,0 +1,12 @@ +- project: + name: gophercloud/gophercloud + check: + jobs: + - gophercloud-unittest + - gophercloud-acceptance-test + recheck-mitaka: + jobs: + - gophercloud-acceptance-test-mitaka + recheck-pike: + jobs: + - gophercloud-acceptance-test-pike diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/gophercloud/gophercloud/FAQ.md b/vendor/github.com/gophercloud/gophercloud/FAQ.md new file mode 100644 index 0000000000..88a366a288 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/FAQ.md @@ -0,0 +1,148 @@ +# Tips + +## Implementing default logging and re-authentication attempts + +You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client +like the following and setting it as the provider client's HTTP Client (via the +`gophercloud.ProviderClient.HTTPClient` field): + +```go +//... + +// LogRoundTripper satisfies the http.RoundTripper interface and is used to +// customize the default Gophercloud RoundTripper to allow for logging. +type LogRoundTripper struct { + rt http.RoundTripper + numReauthAttempts int +} + +// newHTTPClient return a custom HTTP client that allows for logging relevant +// information before and after the HTTP request. +func newHTTPClient() http.Client { + return http.Client{ + Transport: &LogRoundTripper{ + rt: http.DefaultTransport, + }, + } +} + +// RoundTrip performs a round-trip HTTP request and logs relevant information about it. +func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + glog.Infof("Request URL: %s\n", request.URL) + + response, err := lrt.rt.RoundTrip(request) + if response == nil { + return nil, err + } + + if response.StatusCode == http.StatusUnauthorized { + if lrt.numReauthAttempts == 3 { + return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") + } + lrt.numReauthAttempts++ + } + + glog.Debugf("Response Status: %s\n", response.Status) + + return response, nil +} + +endpoint := "https://127.0.0.1/auth" +pc := openstack.NewClient(endpoint) +pc.HTTPClient = newHTTPClient() + +//... +``` + + +## Implementing custom objects + +OpenStack request/response objects may differ among variable names or types. + +### Custom request objects + +To pass custom options to a request, implement the desired `OptsBuilder` interface. For +example, to pass in + +```go +type MyCreateServerOpts struct { + Name string + Size int +} +``` + +to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: + +```go +func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{ + "name": o.Name, + "size": o.Size, + }, nil +} +``` + +create an instance of your custom options object, and pass it to `servers.Create`: + +```go +// ... +myOpts := MyCreateServerOpts{ + Name: "s1", + Size: "100", +} +server, err := servers.Create(computeClient, myOpts).Extract() +// ... +``` + +### Custom response objects + +Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be +combined to create a custom object: + +```go +// ... +type MyVolume struct { + volumes.Volume + tenantattr.VolumeExt +} + +var v struct { + MyVolume `json:"volume"` +} + +err := volumes.Get(client, volID).ExtractInto(&v) +// ... +``` + +## Overriding default `UnmarshalJSON` method + +For some response objects, a field may be a custom type or may be allowed to take on +different types. In these cases, overriding the default `UnmarshalJSON` method may be +necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` +method on the type: + +```go +// ... +type MyVolume struct { + ID string `json: "id"` + TimeCreated time.Time `json: "-"` +} + +func (r *MyVolume) UnmarshalJSON(b []byte) error { + type tmp MyVolume + var s struct { + tmp + TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.TimeCreated = time.Time(s.CreatedAt) + + return err +} +// ... +``` diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE new file mode 100644 index 0000000000..fbbbc9e4cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/LICENSE @@ -0,0 +1,191 @@ +Copyright 2012-2013 Rackspace, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md new file mode 100644 index 0000000000..aa383c9cc9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md @@ -0,0 +1,32 @@ +# Compute + +## Floating IPs + +* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` +* `floatingips.Associate` and `floatingips.Disassociate` have been removed. +* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. + +## Security Groups + +* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. +* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. + +## Servers + +* `servers.Reboot` now requires a `servers.RebootOpts` struct: + + ```golang + rebootOpts := &servers.RebootOpts{ + Type: servers.SoftReboot, + } + res := servers.Reboot(client, server.ID, rebootOpts) + ``` + +# Identity + +## V3 + +### Tokens + +* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of + `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md new file mode 100644 index 0000000000..bb218c3fe9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -0,0 +1,159 @@ +# Gophercloud: an OpenStack SDK for Go +[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) +[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) + +Gophercloud is an OpenStack Go SDK. + +## Useful links + +* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) +* [Effective Go](https://golang.org/doc/effective_go.html) + +## How to install + +Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) +is pointing to an appropriate directory where you want to install Gophercloud: + +```bash +mkdir $HOME/go +export GOPATH=$HOME/go +``` + +To protect yourself against changes in your dependencies, we highly recommend choosing a +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for +your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install +Gophercloud as a dependency like so: + +```bash +go get github.com/gophercloud/gophercloud + +# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" + +godep save ./... +``` + +This will install all the source files you need into a `Godeps/_workspace` directory, which is +referenceable from your own source files when you use the `godep go` command. + +## Getting started + +### Credentials + +Because you'll be hitting an API, you will need to retrieve your OpenStack +credentials and either store them as environment variables or in your local Go +files. The first method is recommended because it decouples credential +information from source code, allowing you to push the latter to your version +control system without any security risk. + +You will need to retrieve the following: + +* username +* password +* a valid Keystone identity URL + +For users that have the OpenStack dashboard installed, there's a shortcut. If +you visit the `project/access_and_security` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you will +download a bash file that exports all of your access details to environment +variables. To execute the file, run `source admin-openrc.sh` and you will be +prompted for your password. + +### Authentication + +Once you have access to your credentials, you can begin plugging them into +Gophercloud. The next step is authentication, and this is handled by a base +"Provider" struct. To get one, you can either pass in your credentials +explicitly, or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// Option 1: Pass in the values yourself +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", +} + +// Option 2: Use a utility function to retrieve all your environment variables +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have the `opts` variable, you can pass it in and get back a +`ProviderClient` struct: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +The `ProviderClient` is the top-level client that all of your OpenStack services +derive from. The provider contains all of the authentication details that allow +your Go code to access the API - such as the base URL and token ID. + +### Provision a server + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. In order to work with the Compute API, we need a Compute service +client; which can be created like so: + +```go +client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), +}) +``` + +We then use this `client` for any Compute API operation we want. In our case, +we want to provision a new server - so we invoke the `Create` method and pass +in the flavor ID (hardware specification) and image ID (operating system) we're +interested in: + +```go +import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + +server, err := servers.Create(client, servers.CreateOpts{ + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", +}).Extract() +``` + +The above code sample creates a new server with the parameters, and embodies the +new resource in the `server` variable (a +[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). + +## Advanced Usage + +Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works. + +## Backwards-Compatibility Guarantees + +None. Vendor it and write tests covering the parts you use. + +## Contributing + +See the [contributing guide](./.github/CONTRIBUTING.md). + +## Help and feedback + +If you're struggling with something or have spotted a potential bug, feel free +to submit an issue to our [bug tracker](/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md new file mode 100644 index 0000000000..22a2900941 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md @@ -0,0 +1,79 @@ + +## On Pull Requests + +- Please make sure to read our [contributing guide](/.github/CONTRIBUTING.md). + +- Before you start a PR there needs to be a Github issue and a discussion about it + on that issue with a core contributor, even if it's just a 'SGTM'. + +- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). + +- A PR's description must contain link(s) to the line(s) in the OpenStack + source code (on Github) that prove(s) the PR code to be valid. Links to documentation + are not good enough. The link(s) should be to a non-`master` branch. For example, + a pull request implementing the creation of a Neutron v2 subnet might put the + following link in the description: + + https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 + + From that link, a reviewer (or user) can verify the fields in the request/response + objects in the PR. + +- A PR that is in-progress should have `[wip]` in front of the PR's title. When + ready for review, remove the `[wip]` and ping a core contributor with an `@`. + +- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with + one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] + prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the + [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will + let reviewers know it is ready to review. + +- A PR should be small. Even if you intend on implementing an entire + service, a PR should only be one route of that service + (e.g. create server or get server, but not both). + +- Unless explicitly asked, do not squash commits in the middle of a review; only + append. It makes it difficult for the reviewer to see what's changed from one + review to the next. + +- See [#583](https://github.com/gophercloud/gophercloud/issues/583) as an example of a + well-formatted issue which contains all relevant information we need to review and approve. + +## On Code + +- In re design: follow as closely as is reasonable the code already in the library. + Most operations (e.g. create, delete) admit the same design. + +- Unit tests and acceptance (integration) tests must be written to cover each PR. + Tests for operations with several options (e.g. list, create) should include all + the options in the tests. This will allow users to verify an operation on their + own infrastructure and see an example of usage. + +- If in doubt, ask in-line on the PR. + +### File Structure + +- The following should be used in most cases: + + - `requests.go`: contains all the functions that make HTTP requests and the + types associated with the HTTP request (parameters for URL, body, etc) + - `results.go`: contains all the response objects and their methods + - `urls.go`: contains the endpoints to which the requests are made + +### Naming + +- For methods on a type in `results.go`, the receiver should be named `r` and the + variable into which it will be unmarshalled `s`. + +- Functions in `requests.go`, with the exception of functions that return a + `pagination.Pager`, should be named returns of the name `r`. + +- Functions in `requests.go` that accept request bodies should accept as their + last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). + This `interface` should have at the least a method named `ToMap` + (eg `ToPortCreateMap`). + +- Functions in `requests.go` that accept query strings should accept as their + last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). + This `interface` should have at the least a method named `ToQuery` + (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go new file mode 100644 index 0000000000..4211470020 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -0,0 +1,354 @@ +package gophercloud + +/* +AuthOptions stores information needed to authenticate to an OpenStack Cloud. +You can populate one manually, or use a provider's AuthOptionsFromEnv() function +to read relevant information from the standard environment variables. Pass one +to a provider's AuthenticatedClient function to authenticate and obtain a +ProviderClient representing an active session on that provider. + +Its fields are the union of those recognized by each identity implementation and +provider. + +An example of manually providing authentication information: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +An example of using AuthOptionsFromEnv(), where the environment variables can +be read from a file, such as a standard openrc file: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed by + // all of the identity services, it will often be populated by a provider-level + // function. + // + // The IdentityEndpoint is typically referred to as the "auth_url" or + // "OS_AUTH_URL" in the information provided by the cloud operator. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"-"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // The same fields are known as project_id and project_name in the Identity + // V3 API, but are collected as TenantID and TenantName here in both cases. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // If DomainID or DomainName are provided, they will also apply to TenantName. + // It is not currently possible to authenticate with Username and a Domain + // and scope to a Project in a different Domain by using TenantName. To + // accomplish that, the ProjectID will need to be provided as the TenantID + // option. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud to + // cache your credentials in memory, and to allow Gophercloud to attempt to + // re-authenticate automatically if/when your token expires. If you set it to + // false, it will not cache these settings, but re-authentication will not be + // possible. This setting defaults to false. + // + // NOTE: The reauth function will try to re-authenticate endlessly if left + // unchecked. The way to limit the number of attempts is to provide a custom + // HTTP client to the provider client and provide a transport that implements + // the RoundTripper interface and stores the number of failed retries. For an + // example of this, see here: + // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` +} + +// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder +// interface in the v2 tokens package +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + // Populate the request map. + authMap := make(map[string]interface{}) + + if opts.Username != "" { + if opts.Password != "" { + authMap["passwordCredentials"] = map[string]interface{}{ + "username": opts.Username, + "password": opts.Password, + } + } else { + return nil, ErrMissingInput{Argument: "Password"} + } + } else if opts.TokenID != "" { + authMap["token"] = map[string]interface{}{ + "id": opts.TokenID, + } + } else { + return nil, ErrMissingInput{Argument: "Username"} + } + + if opts.TenantID != "" { + authMap["tenantId"] = opts.TenantID + } + if opts.TenantName != "" { + authMap["tenantName"] = opts.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + type domainReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } + + type projectReq struct { + Domain *domainReq `json:"domain,omitempty"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + } + + type userReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Password string `json:"password"` + Domain *domainReq `json:"domain,omitempty"` + } + + type passwordReq struct { + User userReq `json:"user"` + } + + type tokenReq struct { + ID string `json:"id"` + } + + type identityReq struct { + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + } + + type authReq struct { + Identity identityReq `json:"identity"` + } + + type request struct { + Auth authReq `json:"auth"` + } + + // Populate the request structure based on the provided arguments. Create and return an error + // if insufficient or incompatible information is present. + var req request + + if opts.Password == "" { + if opts.TokenID != "" { + // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication + // parameters. + if opts.Username != "" { + return nil, ErrUsernameWithToken{} + } + if opts.UserID != "" { + return nil, ErrUserIDWithToken{} + } + if opts.DomainID != "" { + return nil, ErrDomainIDWithToken{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithToken{} + } + + // Configure the request for Token authentication. + req.Auth.Identity.Methods = []string{"token"} + req.Auth.Identity.Token = &tokenReq{ + ID: opts.TokenID, + } + } else { + // If no password or token ID are available, authentication can't continue. + return nil, ErrMissingPassword{} + } + } else { + // Password authentication. + req.Auth.Identity.Methods = []string{"password"} + + // At least one of Username and UserID must be specified. + if opts.Username == "" && opts.UserID == "" { + return nil, ErrUsernameOrUserID{} + } + + if opts.Username != "" { + // If Username is provided, UserID may not be provided. + if opts.UserID != "" { + return nil, ErrUsernameOrUserID{} + } + + // Either DomainID or DomainName must also be specified. + if opts.DomainID == "" && opts.DomainName == "" { + return nil, ErrDomainIDOrDomainName{} + } + + if opts.DomainID != "" { + if opts.DomainName != "" { + return nil, ErrDomainIDOrDomainName{} + } + + // Configure the request for Username and Password authentication with a DomainID. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{ID: &opts.DomainID}, + }, + } + } + + if opts.DomainName != "" { + // Configure the request for Username and Password authentication with a DomainName. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{Name: &opts.DomainName}, + }, + } + } + } + + if opts.UserID != "" { + // If UserID is specified, neither DomainID nor DomainName may be. + if opts.DomainID != "" { + return nil, ErrDomainIDWithUserID{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithUserID{} + } + + // Configure the request for UserID and Password authentication. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ID: &opts.UserID, Password: opts.Password}, + } + } + } + + b, err := BuildRequestBody(req, "") + if err != nil { + return nil, err + } + + if len(scope) != 0 { + b["auth"].(map[string]interface{})["scope"] = scope + } + + return b, nil +} + +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + + var scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string + } + + if opts.TenantID != "" { + scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + scope.ProjectName = opts.TenantName + scope.DomainID = opts.DomainID + scope.DomainName = opts.DomainName + } + } + + if scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if scope.DomainID == "" && scope.DomainName == "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + if scope.ProjectID != "" { + return nil, ErrScopeProjectIDOrProjectName{} + } + + if scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &scope.ProjectName, + "domain": map[string]interface{}{"id": &scope.DomainID}, + }, + }, nil + } + + if scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &scope.ProjectName, + "domain": map[string]interface{}{"name": &scope.DomainName}, + }, + }, nil + } + } else if scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if scope.DomainID != "" { + return nil, ErrScopeProjectIDAlone{} + } + if scope.DomainName != "" { + return nil, ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &scope.ProjectID, + }, + }, nil + } else if scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if scope.DomainName != "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &scope.DomainID, + }, + }, nil + } else if scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts AuthOptions) CanReauth() bool { + return opts.AllowReauth +} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go new file mode 100644 index 0000000000..30067aa352 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -0,0 +1,93 @@ +/* +Package gophercloud provides a multi-vendor interface to OpenStack-compatible +clouds. The library has a three-level hierarchy: providers, services, and +resources. + +Authenticating with Providers + +Provider structs represent the cloud providers that offer and manage a +collection of services. You will generally want to create one Provider +client per OpenStack cloud. + +Use your OpenStack credentials to create a Provider client. The +IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in +information provided by the cloud operator. Additionally, the cloud may refer to +TenantID or TenantName as project_id and project_name. Credentials are +specified like so: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You may also use the openstack.AuthOptionsFromEnv() helper function. This +function reads in standard environment variables frequently found in an +OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" +instead of "project". + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) + +Service Clients + +Service structs are specific to a provider and handle all of the logic and +operations for a particular OpenStack service. Examples of services include: +Compute, Object Storage, Block Storage. In order to define one, you need to +pass in the parent provider, like so: + + opts := gophercloud.EndpointOpts{Region: "RegionOne"} + + client := openstack.NewComputeV2(provider, opts) + +Resources + +Resource structs are the domain models that services make use of in order +to work with and represent the state of API resources: + + server, err := servers.Get(client, "{serverId}").Extract() + +Intermediate Result structs are returned for API operations, which allow +generic access to the HTTP headers, response body, and any errors associated +with the network transaction. To turn a result into a usable resource struct, +you must call the Extract method which is chained to the response, or an +Extract function from an applicable extension: + + result := servers.Get(client, "{serverId}") + + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) + +All requests that enumerate a collection return a Pager struct that is used to +iterate through the results one page at a time. Use the EachPage method on that +Pager to handle each successive Page in a closure, then use the appropriate +extraction method from that request's package to interpret that Page as a slice +of results: + + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + // Handle the []servers.Server slice. + + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) + +If you want to obtain the entire collection of pages without doing any +intermediary processing on each page, you can use the AllPages method: + + allPages, err := servers.List(client, nil).AllPages() + allServers, err := servers.ExtractServers(allPages) + +This top-level package contains utility functions and data types that are used +throughout the provider and service packages. Of particular note for end users +are the AuthOptions and EndpointOpts structs. +*/ +package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go new file mode 100644 index 0000000000..2fbc3c97f1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go @@ -0,0 +1,76 @@ +package gophercloud + +// Availability indicates to whom a specific service endpoint is accessible: +// the internet at large, internal networks only, or only to administrators. +// Different identity services use different terminology for these. Identity v2 +// lists them as different kinds of URLs within the service catalog ("adminURL", +// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an +// endpoint's response. +type Availability string + +const ( + // AvailabilityAdmin indicates that an endpoint is only available to + // administrators. + AvailabilityAdmin Availability = "admin" + + // AvailabilityPublic indicates that an endpoint is available to everyone on + // the internet. + AvailabilityPublic Availability = "public" + + // AvailabilityInternal indicates that an endpoint is only available within + // the cluster's internal network. + AvailabilityInternal Availability = "internal" +) + +// EndpointOpts specifies search criteria used by queries against an +// OpenStack service catalog. The options must contain enough information to +// unambiguously identify one, and only one, endpoint within the catalog. +// +// Usually, these are passed to service client factory functions in a provider +// package, like "openstack.NewComputeV2()". +type EndpointOpts struct { + // Type [required] is the service type for the client (e.g., "compute", + // "object-store"). Generally, this will be supplied by the service client + // function, but a user-given value will be honored if provided. + Type string + + // Name [optional] is the service name for the client (e.g., "nova") as it + // appears in the service catalog. Services can have the same Type but a + // different Name, which is why both Type and Name are sometimes needed. + Name string + + // Region [required] is the geographic region in which the endpoint resides, + // generally specifying which datacenter should house your resources. + // Required only for services that span multiple regions. + Region string + + // Availability [optional] is the visibility of the endpoint to be returned. + // Valid types include the constants AvailabilityPublic, AvailabilityInternal, + // or AvailabilityAdmin from this package. + // + // Availability is not required, and defaults to AvailabilityPublic. Not all + // providers or services offer all Availability options. + Availability Availability +} + +/* +EndpointLocator is an internal function to be used by provider implementations. + +It provides an implementation that locates a single endpoint from a service +catalog for a specific ProviderClient based on user-provided EndpointOpts. The +provider then uses it to discover related ServiceClients. +*/ +type EndpointLocator func(EndpointOpts) (string, error) + +// ApplyDefaults is an internal method to be used by provider implementations. +// +// It sets EndpointOpts fields if not already set, including a default type. +// Currently, EndpointOpts.Availability defaults to the public endpoint. +func (eo *EndpointOpts) ApplyDefaults(t string) { + if eo.Type == "" { + eo.Type = t + } + if eo.Availability == "" { + eo.Availability = AvailabilityPublic + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go new file mode 100644 index 0000000000..2466932efe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -0,0 +1,419 @@ +package gophercloud + +import "fmt" + +// BaseError is an error type that all other error types embed. +type BaseError struct { + DefaultErrString string + Info string +} + +func (e BaseError) Error() string { + e.DefaultErrString = "An error occurred while executing a Gophercloud request." + return e.choseErrString() +} + +func (e BaseError) choseErrString() string { + if e.Info != "" { + return e.Info + } + return e.DefaultErrString +} + +// ErrMissingInput is the error when input is required in a particular +// situation but not provided by the user +type ErrMissingInput struct { + BaseError + Argument string +} + +func (e ErrMissingInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) + return e.choseErrString() +} + +// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. +type ErrInvalidInput struct { + ErrMissingInput + Value interface{} +} + +func (e ErrInvalidInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) + return e.choseErrString() +} + +// ErrUnexpectedResponseCode is returned by the Request method when a response code other than +// those listed in OkCodes is encountered. +type ErrUnexpectedResponseCode struct { + BaseError + URL string + Method string + Expected []int + Actual int + Body []byte +} + +func (e ErrUnexpectedResponseCode) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", + e.Expected, e.Method, e.URL, e.Actual, e.Body, + ) + return e.choseErrString() +} + +// ErrDefault400 is the default error type returned on a 400 HTTP response code. +type ErrDefault400 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault401 is the default error type returned on a 401 HTTP response code. +type ErrDefault401 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault403 is the default error type returned on a 403 HTTP response code. +type ErrDefault403 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault404 is the default error type returned on a 404 HTTP response code. +type ErrDefault404 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault405 is the default error type returned on a 405 HTTP response code. +type ErrDefault405 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault408 is the default error type returned on a 408 HTTP response code. +type ErrDefault408 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault429 is the default error type returned on a 429 HTTP response code. +type ErrDefault429 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault500 is the default error type returned on a 500 HTTP response code. +type ErrDefault500 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault503 is the default error type returned on a 503 HTTP response code. +type ErrDefault503 struct { + ErrUnexpectedResponseCode +} + +func (e ErrDefault400) Error() string { + return "Invalid request due to incorrect syntax or missing required parameters." +} +func (e ErrDefault401) Error() string { + return "Authentication failed" +} +func (e ErrDefault403) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Request forbidden: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault404) Error() string { + return "Resource not found" +} +func (e ErrDefault405) Error() string { + return "Method not allowed" +} +func (e ErrDefault408) Error() string { + return "The server timed out waiting for the request" +} +func (e ErrDefault429) Error() string { + return "Too many requests have been sent in a given amount of time. Pause" + + " requests, wait up to one minute, and try again." +} +func (e ErrDefault500) Error() string { + return "Internal Server Error" +} +func (e ErrDefault503) Error() string { + return "The service is currently unable to handle the request due to a temporary" + + " overloading or maintenance. This is a temporary condition. Try again later." +} + +// Err400er is the interface resource error types implement to override the error message +// from a 400 error. +type Err400er interface { + Error400(ErrUnexpectedResponseCode) error +} + +// Err401er is the interface resource error types implement to override the error message +// from a 401 error. +type Err401er interface { + Error401(ErrUnexpectedResponseCode) error +} + +// Err403er is the interface resource error types implement to override the error message +// from a 403 error. +type Err403er interface { + Error403(ErrUnexpectedResponseCode) error +} + +// Err404er is the interface resource error types implement to override the error message +// from a 404 error. +type Err404er interface { + Error404(ErrUnexpectedResponseCode) error +} + +// Err405er is the interface resource error types implement to override the error message +// from a 405 error. +type Err405er interface { + Error405(ErrUnexpectedResponseCode) error +} + +// Err408er is the interface resource error types implement to override the error message +// from a 408 error. +type Err408er interface { + Error408(ErrUnexpectedResponseCode) error +} + +// Err429er is the interface resource error types implement to override the error message +// from a 429 error. +type Err429er interface { + Error429(ErrUnexpectedResponseCode) error +} + +// Err500er is the interface resource error types implement to override the error message +// from a 500 error. +type Err500er interface { + Error500(ErrUnexpectedResponseCode) error +} + +// Err503er is the interface resource error types implement to override the error message +// from a 503 error. +type Err503er interface { + Error503(ErrUnexpectedResponseCode) error +} + +// ErrTimeOut is the error type returned when an operations times out. +type ErrTimeOut struct { + BaseError +} + +func (e ErrTimeOut) Error() string { + e.DefaultErrString = "A time out occurred" + return e.choseErrString() +} + +// ErrUnableToReauthenticate is the error type returned when reauthentication fails. +type ErrUnableToReauthenticate struct { + BaseError + ErrOriginal error +} + +func (e ErrUnableToReauthenticate) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrErrorAfterReauthentication is the error type returned when reauthentication +// succeeds, but an error occurs afterword (usually an HTTP error). +type ErrErrorAfterReauthentication struct { + BaseError + ErrOriginal error +} + +func (e ErrErrorAfterReauthentication) Error() string { + e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrServiceNotFound is returned when no service in a service catalog matches +// the provided EndpointOpts. This is generally returned by provider service +// factory methods like "NewComputeV2()" and can mean that a service is not +// enabled for your account. +type ErrServiceNotFound struct { + BaseError +} + +func (e ErrServiceNotFound) Error() string { + e.DefaultErrString = "No suitable service could be found in the service catalog." + return e.choseErrString() +} + +// ErrEndpointNotFound is returned when no available endpoints match the +// provided EndpointOpts. This is also generally returned by provider service +// factory methods, and usually indicates that a region was specified +// incorrectly. +type ErrEndpointNotFound struct { + BaseError +} + +func (e ErrEndpointNotFound) Error() string { + e.DefaultErrString = "No suitable endpoint could be found in the service catalog." + return e.choseErrString() +} + +// ErrResourceNotFound is the error when trying to retrieve a resource's +// ID by name and the resource doesn't exist. +type ErrResourceNotFound struct { + BaseError + Name string + ResourceType string +} + +func (e ErrResourceNotFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrMultipleResourcesFound is the error when trying to retrieve a resource's +// ID by name and multiple resources have the user-provided name. +type ErrMultipleResourcesFound struct { + BaseError + Name string + Count int + ResourceType string +} + +func (e ErrMultipleResourcesFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrUnexpectedType is the error when an unexpected type is encountered +type ErrUnexpectedType struct { + BaseError + Expected string + Actual string +} + +func (e ErrUnexpectedType) Error() string { + e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) + return e.choseErrString() +} + +func unacceptedAttributeErr(attribute string) string { + return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) +} + +func redundantWithTokenErr(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) +} + +func redundantWithUserID(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) +} + +// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. +type ErrAPIKeyProvided struct{ BaseError } + +func (e ErrAPIKeyProvided) Error() string { + return unacceptedAttributeErr("APIKey") +} + +// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. +type ErrTenantIDProvided struct{ BaseError } + +func (e ErrTenantIDProvided) Error() string { + return unacceptedAttributeErr("TenantID") +} + +// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. +type ErrTenantNameProvided struct{ BaseError } + +func (e ErrTenantNameProvided) Error() string { + return unacceptedAttributeErr("TenantName") +} + +// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. +type ErrUsernameWithToken struct{ BaseError } + +func (e ErrUsernameWithToken) Error() string { + return redundantWithTokenErr("Username") +} + +// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. +type ErrUserIDWithToken struct{ BaseError } + +func (e ErrUserIDWithToken) Error() string { + return redundantWithTokenErr("UserID") +} + +// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. +type ErrDomainIDWithToken struct{ BaseError } + +func (e ErrDomainIDWithToken) Error() string { + return redundantWithTokenErr("DomainID") +} + +// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s +type ErrDomainNameWithToken struct{ BaseError } + +func (e ErrDomainNameWithToken) Error() string { + return redundantWithTokenErr("DomainName") +} + +// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. +type ErrUsernameOrUserID struct{ BaseError } + +func (e ErrUsernameOrUserID) Error() string { + return "Exactly one of Username and UserID must be provided for password authentication" +} + +// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. +type ErrDomainIDWithUserID struct{ BaseError } + +func (e ErrDomainIDWithUserID) Error() string { + return redundantWithUserID("DomainID") +} + +// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. +type ErrDomainNameWithUserID struct{ BaseError } + +func (e ErrDomainNameWithUserID) Error() string { + return redundantWithUserID("DomainName") +} + +// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. +// It may also indicate that both a DomainID and a DomainName were provided at once. +type ErrDomainIDOrDomainName struct{ BaseError } + +func (e ErrDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName to authenticate by Username" +} + +// ErrMissingPassword indicates that no password was provided and no token is available. +type ErrMissingPassword struct{ BaseError } + +func (e ErrMissingPassword) Error() string { + return "You must provide a password to authenticate" +} + +// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. +type ErrScopeDomainIDOrDomainName struct{ BaseError } + +func (e ErrScopeDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" +} + +// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. +type ErrScopeProjectIDOrProjectName struct{ BaseError } + +func (e ErrScopeProjectIDOrProjectName) Error() string { + return "You must provide at most one of ProjectID or ProjectName in a Scope" +} + +// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. +type ErrScopeProjectIDAlone struct{ BaseError } + +func (e ErrScopeProjectIDAlone) Error() string { + return "ProjectID must be supplied alone in a Scope" +} + +// ErrScopeEmpty indicates that no credentials were provided in a Scope. +type ErrScopeEmpty struct{ BaseError } + +func (e ErrScopeEmpty) Error() string { + return "You must provide either a Project or Domain in a Scope" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go new file mode 100644 index 0000000000..b5482ba8c9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -0,0 +1,79 @@ +package openstack + +import ( + "os" + + "github.com/gophercloud/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +/* +AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +settings found on the various OpenStack OS_* environment variables. + +The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. + +Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, +or an error will result. OS_TENANT_ID, OS_TENANT_NAME, OS_PROJECT_ID, and +OS_PROJECT_NAME are optional. + +OS_TENANT_ID and OS_TENANT_NAME are mutually exclusive to OS_PROJECT_ID and +OS_PROJECT_NAME. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will +still be referred as "tenant" in Gophercloud. + +To use this function, first set the OS_* environment variables (for example, +by sourcing an `openrc` file), then: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := os.Getenv("OS_AUTH_URL") + username := os.Getenv("OS_USERNAME") + userID := os.Getenv("OS_USERID") + password := os.Getenv("OS_PASSWORD") + tenantID := os.Getenv("OS_TENANT_ID") + tenantName := os.Getenv("OS_TENANT_NAME") + domainID := os.Getenv("OS_DOMAIN_ID") + domainName := os.Getenv("OS_DOMAIN_NAME") + + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } + + if authURL == "" { + err := gophercloud.ErrMissingInput{Argument: "authURL"} + return nilOptions, err + } + + if username == "" && userID == "" { + err := gophercloud.ErrMissingInput{Argument: "username"} + return nilOptions, err + } + + if password == "" { + err := gophercloud.ErrMissingInput{Argument: "password"} + return nilOptions, err + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + } + + return ao, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go new file mode 100644 index 0000000000..85705d2126 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -0,0 +1,402 @@ +package openstack + +import ( + "fmt" + "net/url" + "reflect" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +const ( + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" +) + +/* +NewClient prepares an unauthenticated ProviderClient instance. +Most users will probably prefer using the AuthenticatedClient function +instead. + +This is useful if you wish to explicitly control the version of the identity +service that's used for authentication explicitly, for example. + +A basic example of using this would be: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.NewClient(ao.IdentityEndpoint) + client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) +*/ +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + + u.RawQuery, u.Fragment = "", "" + + var base string + versionRe := regexp.MustCompile("v[0-9.]+/?") + if version := versionRe.FindString(u.Path); version != "" { + base = strings.Replace(u.String(), version, "", -1) + } else { + base = u.String() + } + + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() + + return p, nil +} + +/* +AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint +specified by the options, acquires a token, and returns a Provider Client +instance that's ready to operate. + +If the full path to a versioned identity endpoint was specified (example: +http://example.com:5000/v3), that path will be used as the endpoint to query. + +If a versionless endpoint was specified (example: http://example.com:5000/), +the endpoint will be queried to determine which versions of the identity service +are available, then chooses the most recent or most supported version. + +Example: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service +// supported at the provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v2: + return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) + case v3: + return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + return v2auth(client, "", options, eo) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + v2Client, err := NewIdentityV2(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + v2Opts := tokens2.AuthOptions{ + IdentityEndpoint: options.IdentityEndpoint, + Username: options.Username, + Password: options.Password, + TenantID: options.TenantID, + TenantName: options.TenantName, + AllowReauth: options.AllowReauth, + TokenID: options.TokenID, + } + + result := tokens2.Create(v2Client, v2Opts) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + tao := options + tao.AllowReauth = false + client.ReauthFunc = func() error { + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil + } + } + client.TokenID = token.ID + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V2EndpointURL(catalog, opts) + } + + return nil +} + +// AuthenticateV3 explicitly authenticates against the identity v3 service. +func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + return v3auth(client, "", options, eo) +} + +func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + // Override the generated service endpoint with the one returned by the version endpoint. + v3Client, err := NewIdentityV3(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v3Client.Endpoint = endpoint + } + + result := tokens3.Create(v3Client, opts) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + client.TokenID = token.ID + + if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } + client.ReauthFunc = func() error { + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V3EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to interact with the +// v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v2.0/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 +// identity service. +func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v3/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint and requests will fail unless targeted at /v3. + if !strings.HasSuffix(endpoint, "v3/") { + endpoint = endpoint + "v3/" + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { + sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + url, err := client.EndpointLocator(eo) + if err != nil { + return sc, err + } + sc.ProviderClient = client + sc.Endpoint = url + sc.Type = clientType + return sc, nil +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 +// object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "object-store") +} + +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute +// package. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "compute") +} + +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network +// package. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "network") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 +// block storage service. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volume") +} + +// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 +// block storage service. +func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev2") +} + +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. +func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "sharev2") +} + +// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "cdn") +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 +// orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "orchestration") +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "database") +} + +// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS +// service. +func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "dns") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 +// image service. +func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "image") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 +// load balancer service. +func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "load-balancer") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go new file mode 100644 index 0000000000..cedf1f4d3a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go @@ -0,0 +1,14 @@ +/* +Package openstack contains resources for the individual OpenStack projects +supported in Gophercloud. It also includes functions to authenticate to an +OpenStack cloud and for provisioning various service-level clients. + +Example of Creating a Service Client + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go new file mode 100644 index 0000000000..12c8aebcf7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +/* +V2EndpointURL discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + var endpoints = make([]tokens2.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region == "" || endpoint.Region == opts.Region { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + err := &ErrMultipleMatchingEndpointsV2{} + err.Endpoints = endpoints + return "", err + } + + // Extract the appropriate URL from the matching Endpoint. + for _, endpoint := range endpoints { + switch opts.Availability { + case gophercloud.AvailabilityPublic: + return gophercloud.NormalizeURL(endpoint.PublicURL), nil + case gophercloud.AvailabilityInternal: + return gophercloud.NormalizeURL(endpoint.InternalURL), nil + case gophercloud.AvailabilityAdmin: + return gophercloud.NormalizeURL(endpoint.AdminURL), nil + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3EndpointURL discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} + } + + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go new file mode 100644 index 0000000000..df410b1c61 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go @@ -0,0 +1,71 @@ +package openstack + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +// ErrEndpointNotFound is the error when no suitable endpoint can be found +// in the user's catalog +type ErrEndpointNotFound struct{ gophercloud.BaseError } + +func (e ErrEndpointNotFound) Error() string { + return "No suitable endpoint could be found in the service catalog." +} + +// ErrInvalidAvailabilityProvided is the error when an invalid endpoint +// availability is provided +type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } + +func (e ErrInvalidAvailabilityProvided) Error() string { + return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) +} + +// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint +// for the given options is found in the v2 catalog +type ErrMultipleMatchingEndpointsV2 struct { + gophercloud.BaseError + Endpoints []tokens2.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV2) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint +// for the given options is found in the v3 catalog +type ErrMultipleMatchingEndpointsV3 struct { + gophercloud.BaseError + Endpoints []tokens3.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV3) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not +// found +type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoAuthURL) Error() string { + return "Environment variable OS_AUTH_URL needs to be set." +} + +// ErrNoUsername is the error when the OS_USERNAME environment variable is not +// found +type ErrNoUsername struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoUsername) Error() string { + return "Environment variable OS_USERNAME needs to be set." +} + +// ErrNoPassword is the error when the OS_PASSWORD environment variable is not +// found +type ErrNoPassword struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoPassword) Error() string { + return "Environment variable OS_PASSWORD needs to be set." +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go new file mode 100644 index 0000000000..45623369e1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -0,0 +1,65 @@ +/* +Package tenants provides information and interaction with the +tenants API resource for the OpenStack Identity service. + +See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +for more information. + +Example to List Tenants + + listOpts := tenants.ListOpts{ + Limit: 2, + } + + allPages, err := tenants.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + panic(err) + } + + for _, tenant := range allTenants { + fmt.Printf("%+v\n", tenant) + } + +Example to Create a Tenant + + createOpts := tenants.CreateOpts{ + Name: "tenant_name", + Description: "this is a tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + updateOpts := tenants.UpdateOpts{ + Description: "this is a new description", + Enabled: gophercloud.Disabled, + } + + tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + err := tenants.Delete(identitYClient, tenantID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go new file mode 100644 index 0000000000..60f58c8ce3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -0,0 +1,116 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts filters the Tenants that are returned by the List call. +type ListOpts struct { + // Marker is the ID of the last Tenant on the previous page. + Marker string `q:"marker"` + + // Limit specifies the page size. + Limit int `q:"limit"` +} + +// List enumerates the Tenants to which the current token has access. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + url := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + url += q.String() + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return TenantPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts represents the options needed when creating new tenant. +type CreateOpts struct { + // Name is the name of the tenant. + Name string `json:"name" required:"true"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// CreateOptsBuilder enables extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToTenantCreateMap() (map[string]interface{}, error) +} + +// ToTenantCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Create is the operation responsible for creating new tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToTenantCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get requests details on a single tenant by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToTenantUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// tenant. +type UpdateOpts struct { + // Name is the name of the tenant. + Name string `json:"name,omitempty"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToTenantUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Update is the operation responsible for updating exist tenants by their TenantID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToTenantUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete is the operation responsible for permanently deleting a tenant. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go new file mode 100644 index 0000000000..bb6c2c6b08 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -0,0 +1,91 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Tenant is a grouping of users in the identity service. +type Tenant struct { + // ID is a unique identifier for this tenant. + ID string `json:"id"` + + // Name is a friendlier user-facing name for this tenant. + Name string `json:"name"` + + // Description is a human-readable explanation of this Tenant's purpose. + Description string `json:"description"` + + // Enabled indicates whether or not a tenant is active. + Enabled bool `json:"enabled"` +} + +// TenantPage is a single page of Tenant results. +type TenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (r TenantPage) IsEmpty() (bool, error) { + tenants, err := ExtractTenants(r) + return len(tenants) == 0, err +} + +// NextPageURL extracts the "next" link from the tenants_links section of the result. +func (r TenantPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenants_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractTenants returns a slice of Tenants contained in a single page of +// results. +func ExtractTenants(r pagination.Page) ([]Tenant, error) { + var s struct { + Tenants []Tenant `json:"tenants"` + } + err := (r.(TenantPage)).ExtractInto(&s) + return s.Tenants, err +} + +type tenantResult struct { + gophercloud.Result +} + +// Extract interprets any tenantResults as a Tenant. +func (r tenantResult) Extract() (*Tenant, error) { + var s struct { + Tenant *Tenant `json:"tenant"` + } + err := r.ExtractInto(&s) + return s.Tenant, err +} + +// GetResult is the response from a Get request. Call its Extract method to +// interpret it as a Tenant. +type GetResult struct { + tenantResult +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Tenant. +type CreateResult struct { + tenantResult +} + +// DeleteResult is the response from a Get request. Call its ExtractErr method +// to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the response from a Update request. Call its Extract method +// to interpret it as a Tenant. +type UpdateResult struct { + tenantResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go new file mode 100644 index 0000000000..0f02669079 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go @@ -0,0 +1,23 @@ +package tenants + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func getURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func updateURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go new file mode 100644 index 0000000000..5375eea872 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go @@ -0,0 +1,46 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 + +Example to Create an Unscoped Token from a Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "pass" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant ID and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantID: "fc394f2ab2df4114bde39905f800dc57" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant Name and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantName: "tenantname" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go new file mode 100644 index 0000000000..ab32368cc6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -0,0 +1,103 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// PasswordCredentialsV2 represents the required options to authenticate +// with a username and password. +type PasswordCredentialsV2 struct { + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` +} + +// TokenCredentialsV2 represents the required options to authenticate +// with a token. +type TokenCredentialsV2 struct { + ID string `json:"id,omitempty" required:"true"` +} + +// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the +// AuthOptionsBuilder interface. +type AuthOptionsV2 struct { + PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // TokenCredentials allows users to authenticate (possibly as another user) + // with an authentication token ID. + TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` +} + +// AuthOptionsBuilder allows extensions to add additional parameters to the +// token create request. +type AuthOptionsBuilder interface { + // ToTokenCreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV2CreateMap() (map[string]interface{}, error) +} + +// AuthOptions are the valid options for Openstack Identity v2 authentication. +// For field descriptions, see gophercloud.AuthOptions. +type AuthOptions struct { + IdentityEndpoint string `json:"-"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + AllowReauth bool `json:"-"` + TokenID string +} + +// ToTokenV2CreateMap builds a token request body from the given AuthOptions. +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + v2Opts := AuthOptionsV2{ + TenantID: opts.TenantID, + TenantName: opts.TenantName, + } + + if opts.Password != "" { + v2Opts.PasswordCredentials = &PasswordCredentialsV2{ + Username: opts.Username, + Password: opts.Password, + } + } else { + v2Opts.TokenCredentials = &TokenCredentialsV2{ + ID: opts.TokenID, + } + } + + b, err := gophercloud.BuildRequestBody(v2Opts, "auth") + if err != nil { + return nil, err + } + return b, nil +} + +// Create authenticates to the identity service and attempts to acquire a Token. +// Generally, rather than interact with this call directly, end users should +// call openstack.AuthenticatedClient(), which abstracts all of the gory details +// about navigating service catalogs and such. +func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { + b, err := auth.ToTokenV2CreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + return +} + +// Get validates and retrieves information for user's token. +func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { + _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go new file mode 100644 index 0000000000..b11326772b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -0,0 +1,159 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" +) + +// Token provides only the most basic information related to an authentication +// token. +type Token struct { + // ID provides the primary means of identifying a user to the OpenStack API. + // OpenStack defines this field as an opaque value, so do not depend on its + // content. It is safe, however, to compare for equality. + ID string + + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the + // authentication token becomes invalid. After this point in time, future + // API requests made using this authentication token will respond with + // errors. Either the caller will need to reauthenticate manually, or more + // preferably, the caller should exploit automatic re-authentication. + // See the AuthOptions structure for more details. + ExpiresAt time.Time + + // Tenant provides information about the tenant to which this token grants + // access. + Tenant tenants.Tenant +} + +// Role is a role for a user. +type Role struct { + Name string `json:"name"` +} + +// User is an OpenStack user. +type User struct { + ID string `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` + Roles []Role `json:"roles"` +} + +// Endpoint represents a single API endpoint offered by a service. +// It provides the public and internal URLs, if supported, along with a region +// specifier, again if provided. +// +// The significance of the Region field will depend upon your provider. +// +// In addition, the interface offered by the service will have version +// information associated with it through the VersionId, VersionInfo, and +// VersionList fields, if provided or supported. +// +// In all cases, fields which aren't supported by the provider and service +// combined will assume a zero-value (""). +type Endpoint struct { + TenantID string `json:"tenantId"` + PublicURL string `json:"publicURL"` + InternalURL string `json:"internalURL"` + AdminURL string `json:"adminURL"` + Region string `json:"region"` + VersionID string `json:"versionId"` + VersionInfo string `json:"versionInfo"` + VersionList string `json:"versionList"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V2 service +// catalog listing. +// +// Each class of service, such as cloud DNS or block storage services, will have +// a single CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may assign + // their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// CreateResult is the response from a Create request. Use ExtractToken() to +// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a +// service catalog. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the deferred response from a Get call, which is the same with a +// Created token. Use ExtractUser() to interpret it as a User. +type GetResult struct { + CreateResult +} + +// ExtractToken returns the just-created Token from a CreateResult. +func (r CreateResult) ExtractToken() (*Token, error) { + var s struct { + Access struct { + Token struct { + Expires string `json:"expires"` + ID string `json:"id"` + Tenant tenants.Tenant `json:"tenant"` + } `json:"token"` + } `json:"access"` + } + + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) + if err != nil { + return nil, err + } + + return &Token{ + ID: s.Access.Token.ID, + ExpiresAt: expiresTs, + Tenant: s.Access.Token.Tenant, + }, nil +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s struct { + Access struct { + Entries []CatalogEntry `json:"serviceCatalog"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &ServiceCatalog{Entries: s.Access.Entries}, err +} + +// ExtractUser returns the User from a GetResult. +func (r GetResult) ExtractUser() (*User, error) { + var s struct { + Access struct { + User User `json:"user"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &s.Access.User, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go new file mode 100644 index 0000000000..ee0a28f200 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go @@ -0,0 +1,13 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// CreateURL generates the URL used to create new Tokens. +func CreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tokens") +} + +// GetURL generates the URL used to Validate Tokens. +func GetURL(client *gophercloud.ServiceClient, token string) string { + return client.ServiceURL("tokens", token) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go new file mode 100644 index 0000000000..966e128f12 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -0,0 +1,108 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 + +Example to Create a Token From a Username and Password + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Username, Password, and Domain + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainID: "default", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + + authOptions = tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainName: "default", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Token + + authOptions := tokens.AuthOptions{ + TokenID: "token_id", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project ID Scope + + scope := tokens.Scope{ + ProjectID: "0fe36e73809d46aeae6705c39077b1b3", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Domain ID Scope + + scope := tokens.Scope{ + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project Name Scope + + scope := tokens.Scope{ + ProjectName: "project_name", + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go new file mode 100644 index 0000000000..ca35851e4a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -0,0 +1,210 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// Scope allows a created token to be limited to a specific domain or project. +type Scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// AuthOptionsBuilder provides the ability for extensions to add additional +// parameters to AuthOptions. Extensions must satisfy all required methods. +type AuthOptionsBuilder interface { + // ToTokenV3CreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) + ToTokenV3ScopeMap() (map[string]interface{}, error) + CanReauth() bool +} + +// AuthOptions represents options for authenticating a user. +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed + // by all of the identity services, it will often be populated by a + // provider-level function. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"id,omitempty"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud + // to cache your credentials in memory, and to allow Gophercloud to attempt + // to re-authenticate automatically if/when your token expires. If you set + // it to false, it will not cache these settings, but re-authentication will + // not be possible. This setting defaults to false. + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + Scope Scope `json:"-"` +} + +// ToTokenV3CreateMap builds a request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + gophercloudAuthOpts := gophercloud.AuthOptions{ + Username: opts.Username, + UserID: opts.UserID, + Password: opts.Password, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + AllowReauth: opts.AllowReauth, + TokenID: opts.TokenID, + } + + return gophercloudAuthOpts.ToTokenV3CreateMap(scope) +} + +// ToTokenV3CreateMap builds a scope request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + if opts.Scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { + return nil, gophercloud.ErrScopeDomainIDOrDomainName{} + } + if opts.Scope.ProjectID != "" { + return nil, gophercloud.ErrScopeProjectIDOrProjectName{} + } + + if opts.Scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, + }, + }, nil + } + + if opts.Scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, + }, + }, nil + } + } else if opts.Scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if opts.Scope.DomainID != "" { + return nil, gophercloud.ErrScopeProjectIDAlone{} + } + if opts.Scope.DomainName != "" { + return nil, gophercloud.ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &opts.Scope.ProjectID, + }, + }, nil + } else if opts.Scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if opts.Scope.DomainName != "" { + return nil, gophercloud.ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &opts.Scope.DomainID, + }, + }, nil + } else if opts.Scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &opts.Scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts *AuthOptions) CanReauth() bool { + return opts.AllowReauth +} + +func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { + return map[string]string{ + "X-Subject-Token": subjectToken, + } +} + +// Create authenticates and either generates a new token, or changes the Scope +// of an existing token. +func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { + scope, err := opts.ToTokenV3ScopeMap() + if err != nil { + r.Err = err + return + } + + b, err := opts.ToTokenV3CreateMap(scope) + if err != nil { + r.Err = err + return + } + + resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// Get validates and retrieves information about another token. +func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { + resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, + }) + if resp != nil { + r.Err = err + r.Header = resp.Header + } + return +} + +// Validate determines if a specified token is valid or not. +func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { + resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 204, 404}, + }) + if err != nil { + return false, err + } + + return resp.StatusCode == 200 || resp.StatusCode == 204, nil +} + +// Revoke immediately makes specified token invalid. +func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { + _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go new file mode 100644 index 0000000000..ebdca58f65 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -0,0 +1,171 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" +) + +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `json:"id"` + Region string `json:"region"` + RegionID string `json:"region_id"` + Interface string `json:"interface"` + URL string `json:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service +// catalog listing. Each class of service, such as cloud DNS or block storage +// services, could have multiple CatalogEntry representing it (one by interface +// type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Service ID + ID string `json:"id"` + + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may + // assign their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry `json:"catalog"` +} + +// Domain provides information about the domain to which this token grants +// access. +type Domain struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// User represents a user resource that exists in the Identity Service. +type User struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// Role provides information about roles to which User is authorized. +type Role struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// Project provides information about project to which User is authorized. +type Project struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// commonResult is the response from a request. A commonResult has various +// methods which can be used to extract different details about the result. +type commonResult struct { + gophercloud.Result +} + +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. +func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { + var s Token + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + // Parse the token itself from the stored headers. + s.ID = r.Header.Get("X-Subject-Token") + + return &s, err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s ServiceCatalog + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractUser returns the User that is the owner of the Token. +func (r commonResult) ExtractUser() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ExtractRoles returns Roles to which User is authorized. +func (r commonResult) ExtractRoles() ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := r.ExtractInto(&s) + return s.Roles, err +} + +// ExtractProject returns Project to which User is authorized. +func (r commonResult) ExtractProject() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} + +// CreateResult is the response from a Create request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type CreateResult struct { + commonResult +} + +// GetResult is the response from a Get request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type GetResult struct { + commonResult +} + +// RevokeResult is response from a Revoke request. +type RevokeResult struct { + commonResult +} + +// Token is a string that grants a user access to a controlled set of services +// in an OpenStack provider. Each Token is valid for a set length of time. +type Token struct { + // ID is the issued token. + ID string `json:"id"` + + // ExpiresAt is the timestamp at which this token will no longer be accepted. + ExpiresAt time.Time `json:"expires_at"` +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.ExtractIntoStructPtr(v, "token") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go new file mode 100644 index 0000000000..2f864a31c8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go @@ -0,0 +1,7 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +func tokenURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("auth", "tokens") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go new file mode 100644 index 0000000000..27da19f91a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -0,0 +1,111 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// Version is a supported API version, corresponding to a vN package within the appropriate service. +type Version struct { + ID string + Suffix string + Priority int +} + +var goodStatus = map[string]bool{ + "current": true, + "supported": true, + "stable": true, +} + +// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's +// published versions. +// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. +func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + type linkResp struct { + Href string `json:"href"` + Rel string `json:"rel"` + } + + type valueResp struct { + ID string `json:"id"` + Status string `json:"status"` + Links []linkResp `json:"links"` + } + + type versionsResp struct { + Values []valueResp `json:"values"` + } + + type response struct { + Versions versionsResp `json:"versions"` + } + + normalize := func(endpoint string) string { + if !strings.HasSuffix(endpoint, "/") { + return endpoint + "/" + } + return endpoint + } + identityEndpoint := normalize(client.IdentityEndpoint) + + // If a full endpoint is specified, check version suffixes for a match first. + for _, v := range recognized { + if strings.HasSuffix(identityEndpoint, v.Suffix) { + return v, identityEndpoint, nil + } + } + + var resp response + _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + + if err != nil { + return nil, "", err + } + + var highest *Version + var endpoint string + + for _, value := range resp.Versions.Values { + href := "" + for _, link := range value.Links { + if link.Rel == "self" { + href = normalize(link.Href) + } + } + + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil + } + + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } + } + } + } + } + + if highest == nil { + return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + } + if endpoint == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + } + + return highest, endpoint, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go new file mode 100644 index 0000000000..757295c423 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/http.go @@ -0,0 +1,60 @@ +package pagination + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// PageResult stores the HTTP response that returned the current page of results. +type PageResult struct { + gophercloud.Result + url.URL +} + +// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the +// results, interpreting it as JSON if the content type indicates. +func PageResultFrom(resp *http.Response) (PageResult, error) { + var parsedBody interface{} + + defer resp.Body.Close() + rawBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PageResult{}, err + } + + if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { + err = json.Unmarshal(rawBody, &parsedBody) + if err != nil { + return PageResult{}, err + } + } else { + parsedBody = rawBody + } + + return PageResultFromParsed(resp, parsedBody), err +} + +// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its +// body parsed as JSON (and closed). +func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { + return PageResult{ + Result: gophercloud.Result{ + Body: body, + Header: resp.Header, + }, + URL: *resp.Request.URL, + } +} + +// Request performs an HTTP request and extracts the http.Response from the result. +func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { + return client.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: headers, + OkCodes: []int{200, 204, 300}, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go new file mode 100644 index 0000000000..3656fb7f8f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go @@ -0,0 +1,92 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. +type LinkedPageBase struct { + PageResult + + // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. + // If any link along the path is missing, an empty URL will be returned. + // If any link results in an unexpected value type, an error will be returned. + // When left as "nil", []string{"links", "next"} will be used as a default. + LinkPath []string +} + +// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. +// It assumes that the links are available in a "links" element of the top-level response object. +// If this is not the case, override NextPageURL on your result type. +func (current LinkedPageBase) NextPageURL() (string, error) { + var path []string + var key string + + if current.LinkPath == nil { + path = []string{"links", "next"} + } else { + path = current.LinkPath + } + + submap, ok := current.Body.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return "", err + } + + for { + key, path = path[0], path[1:len(path)] + + value, ok := submap[key] + if !ok { + return "", nil + } + + if len(path) > 0 { + submap, ok = value.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + } else { + if value == nil { + // Actual null element. + return "", nil + } + + url, ok := value.(string) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "string" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + + return url, nil + } + } +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current LinkedPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current LinkedPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go new file mode 100644 index 0000000000..52e53bae85 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go @@ -0,0 +1,58 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. +// For convenience, embed the MarkedPageBase struct. +type MarkerPage interface { + Page + + // LastMarker returns the last "marker" value on this page. + LastMarker() (string, error) +} + +// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. +type MarkerPageBase struct { + PageResult + + // Owner is a reference to the embedding struct. + Owner MarkerPage +} + +// NextPageURL generates the URL for the page of results after this one. +func (current MarkerPageBase) NextPageURL() (string, error) { + currentURL := current.URL + + mark, err := current.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("marker", mark) + currentURL.RawQuery = q.Encode() + + return currentURL.String(), nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current MarkerPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current MarkerPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go new file mode 100644 index 0000000000..7c65926b72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -0,0 +1,237 @@ +package pagination + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" +) + +var ( + // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. + ErrPageNotAvailable = errors.New("The requested page does not exist.") +) + +// Page must be satisfied by the result type of any resource collection. +// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. +// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, +// instead. +// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type +// will need to implement. +type Page interface { + // NextPageURL generates the URL for the page of data that follows this collection. + // Return "" if no such page exists. + NextPageURL() (string, error) + + // IsEmpty returns true if this Page has no items in it. + IsEmpty() (bool, error) + + // GetBody returns the Page Body. This is used in the `AllPages` method. + GetBody() interface{} +} + +// Pager knows how to advance through a specific resource collection, one page at a time. +type Pager struct { + client *gophercloud.ServiceClient + + initialURL string + + createPage func(r PageResult) Page + + Err error + + // Headers supplies additional HTTP headers to populate on each paged request. + Headers map[string]string +} + +// NewPager constructs a manually-configured pager. +// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. +func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { + return Pager{ + client: client, + initialURL: initialURL, + createPage: createPage, + } +} + +// WithPageCreator returns a new Pager that substitutes a different page creation function. This is +// useful for overriding List functions in delegation. +func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { + return Pager{ + client: p.client, + initialURL: p.initialURL, + createPage: createPage, + } +} + +func (p Pager) fetchNextPage(url string) (Page, error) { + resp, err := Request(p.client, p.Headers, url) + if err != nil { + return nil, err + } + + remembered, err := PageResultFrom(resp) + if err != nil { + return nil, err + } + + return p.createPage(remembered), nil +} + +// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. +// Return "false" from the handler to prematurely stop iterating. +func (p Pager) EachPage(handler func(Page) (bool, error)) error { + if p.Err != nil { + return p.Err + } + currentURL := p.initialURL + for { + currentPage, err := p.fetchNextPage(currentURL) + if err != nil { + return err + } + + empty, err := currentPage.IsEmpty() + if err != nil { + return err + } + if empty { + return nil + } + + ok, err := handler(currentPage) + if err != nil { + return err + } + if !ok { + return nil + } + + currentURL, err = currentPage.NextPageURL() + if err != nil { + return err + } + if currentURL == "" { + return nil + } + } +} + +// AllPages returns all the pages from a `List` operation in a single page, +// allowing the user to retrieve all the pages at once. +func (p Pager) AllPages() (Page, error) { + // pagesSlice holds all the pages until they get converted into as Page Body. + var pagesSlice []interface{} + // body will contain the final concatenated Page body. + var body reflect.Value + + // Grab a test page to ascertain the page body type. + testPage, err := p.fetchNextPage(p.initialURL) + if err != nil { + return nil, err + } + // Store the page type so we can use reflection to create a new mega-page of + // that type. + pageType := reflect.TypeOf(testPage) + + // if it's a single page, just return the testPage (first page) + if _, found := pageType.FieldByName("SinglePageBase"); found { + return testPage, nil + } + + // Switch on the page body type. Recognized types are `map[string]interface{}`, + // `[]byte`, and `[]interface{}`. + switch pb := testPage.GetBody().(type) { + case map[string]interface{}: + // key is the map key for the page body if the body type is `map[string]interface{}`. + var key string + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().(map[string]interface{}) + for k, v := range b { + // If it's a linked page, we don't want the `links`, we want the other one. + if !strings.HasSuffix(k, "links") { + // check the field's type. we only want []interface{} (which is really []map[string]interface{}) + switch vt := v.(type) { + case []interface{}: + key = k + pagesSlice = append(pagesSlice, vt...) + } + } + } + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `map[string]interface{}` + body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) + body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) + case []byte: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]byte) + pagesSlice = append(pagesSlice, b) + // seperate pages with a comma + pagesSlice = append(pagesSlice, []byte{10}) + return true, nil + }) + if err != nil { + return nil, err + } + if len(pagesSlice) > 0 { + // Remove the trailing comma. + pagesSlice = pagesSlice[:len(pagesSlice)-1] + } + var b []byte + // Combine the slice of slices in to a single slice. + for _, slice := range pagesSlice { + b = append(b, slice.([]byte)...) + } + // Set body to value of type `bytes`. + body = reflect.New(reflect.TypeOf(b)).Elem() + body.SetBytes(b) + case []interface{}: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]interface{}) + pagesSlice = append(pagesSlice, b...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `[]interface{}` + body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) + for i, s := range pagesSlice { + body.Index(i).Set(reflect.ValueOf(s)) + } + default: + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}/[]byte/[]interface{}" + err.Actual = fmt.Sprintf("%T", pb) + return nil, err + } + + // Each `Extract*` function is expecting a specific type of page coming back, + // otherwise the type assertion in those functions will fail. pageType is needed + // to create a type in this method that has the same type that the `Extract*` + // function is expecting and set the Body of that object to the concatenated + // pages. + page := reflect.New(pageType) + // Set the page body to be the concatenated pages. + page.Elem().FieldByName("Body").Set(body) + // Set any additional headers that were pass along. The `objectstorage` pacakge, + // for example, passes a Content-Type header. + h := make(http.Header) + for k, v := range p.Headers { + h.Add(k, v) + } + page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) + // Type assert the page to a Page interface so that the type assertion in the + // `Extract*` methods will work. + return page.Elem().Interface().(Page), err +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go new file mode 100644 index 0000000000..912daea364 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go @@ -0,0 +1,4 @@ +/* +Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. +*/ +package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go new file mode 100644 index 0000000000..4251d6491e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/single.go @@ -0,0 +1,33 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. +type SinglePageBase PageResult + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (current SinglePageBase) NextPageURL() (string, error) { + return "", nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current SinglePageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the single page's body. This method is needed to satisfy the +// Page interface. +func (current SinglePageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go new file mode 100644 index 0000000000..28ad906856 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -0,0 +1,477 @@ +package gophercloud + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +/* +BuildRequestBody builds a map[string]interface from the given `struct`. If +parent is not an empty string, the final map[string]interface returned will +encapsulate the built one. For example: + + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + +The above example can be run as-is, however it is recommended to look at how +BuildRequestBody is used within Gophercloud to more fully understand how it +fits within the request process as a whole rather than use it directly as shown +above. +*/ +func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]interface{}) + if optsValue.Kind() == reflect.Struct { + //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + + if f.Name != strings.Title(f.Name) { + //fmt.Printf("Skipping field: %s...\n", f.Name) + continue + } + + //fmt.Printf("Starting on field: %s...\n", f.Name) + + zero := isZero(v) + //fmt.Printf("v is zero?: %v\n", zero) + + // if the field has a required tag that's set to "true" + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) + // if the field's value is zero, return a missing-argument error + if zero { + // if the field has a 'required' tag, it can't have a zero-value + err := ErrMissingInput{} + err.Argument = f.Name + return nil, err + } + } + + if xorTag := f.Tag.Get("xor"); xorTag != "" { + //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) + xorField := optsValue.FieldByName(xorTag) + var xorFieldIsZero bool + if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { + xorFieldIsZero = true + } else { + if xorField.Kind() == reflect.Ptr { + xorField = xorField.Elem() + } + xorFieldIsZero = isZero(xorField) + } + if !(zero != xorFieldIsZero) { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) + err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) + return nil, err + } + } + + if orTag := f.Tag.Get("or"); orTag != "" { + //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) + //fmt.Printf("field is zero?: %v\n", zero) + if zero { + orField := optsValue.FieldByName(orTag) + var orFieldIsZero bool + if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { + orFieldIsZero = true + } else { + if orField.Kind() == reflect.Ptr { + orField = orField.Elem() + } + orFieldIsZero = isZero(orField) + } + if orFieldIsZero { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) + err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) + return nil, err + } + } + } + + jsonTag := f.Tag.Get("json") + if jsonTag == "-" { + continue + } + + if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { + if zero { + //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) + if jsonTag != "" { + jsonTagPieces := strings.Split(jsonTag, ",") + if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { + if v.CanSet() { + if !v.IsNil() { + if v.Kind() == reflect.Ptr { + v.Set(reflect.Zero(v.Type())) + } + } + //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) + } + } + } + continue + } + + //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) + _, err := BuildRequestBody(v.Interface(), f.Name) + if err != nil { + return nil, err + } + } + } + + //fmt.Printf("opts: %+v \n", opts) + + b, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + //fmt.Printf("string(b): %s\n", string(b)) + + err = json.Unmarshal(b, &optsMap) + if err != nil { + return nil, err + } + + //fmt.Printf("optsMap: %+v\n", optsMap) + + if parent != "" { + optsMap = map[string]interface{}{parent: optsMap} + } + //fmt.Printf("optsMap after parent added: %+v\n", optsMap) + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +// EnabledState is a convenience type, mostly used in Create and Update +// operations. Because the zero value of a bool is FALSE, we need to use a +// pointer instead to indicate zero-ness. +type EnabledState *bool + +// Convenience vars for EnabledState values. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// IPVersion is a type for the possible IP address versions. Valid instances +// are IPv4 and IPv6 +type IPVersion int + +const ( + // IPv4 is used for IP version 4 addresses + IPv4 IPVersion = 4 + // IPv6 is used for IP version 6 addresses + IPv6 IPVersion = 6 +) + +// IntToPointer is a function for converting integers into integer pointers. +// This is useful when passing in options to operations. +func IntToPointer(i int) *int { + return &i +} + +/* +MaybeString is an internal function to be used by request methods in individual +resource packages. + +It takes a string that might be a zero value and returns either a pointer to its +address or nil. This is useful for allowing users to conveniently omit values +from an options struct by leaving them zeroed, but still pass nil to the JSON +serializer so they'll be omitted from the request body. +*/ +func MaybeString(original string) *string { + if original != "" { + return &original + } + return nil +} + +/* +MaybeInt is an internal function to be used by request methods in individual +resource packages. + +Like MaybeString, it accepts an int that may or may not be a zero value, and +returns either a pointer to its address or nil. It's intended to hint that the +JSON serializer should omit its field. +*/ +func MaybeInt(original int) *int { + if original != 0 { + return &original + } + return nil +} + +/* +func isUnderlyingStructZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr: + return isUnderlyingStructZero(v.Elem()) + default: + return isZero(v) + } +} +*/ + +var t time.Time + +func isZero(v reflect.Value) bool { + //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + return true + } + return false + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + if v.Type() == reflect.TypeOf(t) { + if v.Interface().(time.Time).IsZero() { + return true + } + return false + } + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + //fmt.Printf("zero type for value: %+v\n\n\n", z) + return v.Interface() == z.Interface() +} + +/* +BuildQueryString is an internal function to be used by request methods in +individual resource packages. + +It accepts a tagged structure and expands it into a URL struct. Field names are +converted into query parameters based on a "q" tag. For example: + + type struct Something { + Bar string `q:"x_bar"` + Baz int `q:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into "?x_bar=AAA&lorem_ipsum=BBB". + +The struct's fields may be strings, integers, or boolean values. Fields left at +their type's zero value will be omitted from the query. +*/ +func BuildQueryString(opts interface{}) (*url.URL, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + params := url.Values{} + + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + qTag := f.Tag.Get("q") + + // if the field has a 'q' tag, it goes in the query string + if qTag != "" { + tags := strings.Split(qTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + loop: + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + goto loop + case reflect.String: + params.Add(tags[0], v.String()) + case reflect.Int: + params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) + case reflect.Bool: + params.Add(tags[0], strconv.FormatBool(v.Bool())) + case reflect.Slice: + switch v.Type().Elem() { + case reflect.TypeOf(0): + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + } + default: + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], v.Index(i).String()) + } + } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } + } + } else { + // Otherwise, the field is not set. + if len(tags) == 2 && tags[1] == "required" { + // And the field is required. Return an error. + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + } + } + } + } + + return &url.URL{RawQuery: params.Encode()}, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +/* +BuildHeaders is an internal function to be used by request methods in +individual resource packages. + +It accepts an arbitrary tagged structure and produces a string map that's +suitable for use as the HTTP headers of an outgoing request. Field names are +mapped to header names based in "h" tags. + + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into: + + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } + +Untagged fields and fields left at their zero values are skipped. Integers, +booleans and string values are supported. +*/ +func BuildHeaders(opts interface{}) (map[string]string, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]string) + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + hTag := f.Tag.Get("h") + + // if the field has a 'h' tag, it goes in the header + if hTag != "" { + tags := strings.Split(hTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + optsMap[tags[0]] = v.String() + case reflect.Int: + optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) + case reflect.Bool: + optsMap[tags[0]] = strconv.FormatBool(v.Bool()) + } + } else { + // Otherwise, the field is not set. + if len(tags) == 2 && tags[1] == "required" { + // And the field is required. Return an error. + return optsMap, fmt.Errorf("Required header not set.") + } + } + } + + } + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return optsMap, fmt.Errorf("Options type is not a struct.") +} + +// IDSliceToQueryString takes a slice of elements and converts them into a query +// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the +// result would be `?name=20&name=40&name=60' +func IDSliceToQueryString(name string, ids []int) string { + str := "" + for k, v := range ids { + if k == 0 { + str += "?" + } else { + str += "&" + } + str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) + } + return str +} + +// IntWithinRange returns TRUE if an integer falls within a defined range, and +// FALSE if not. +func IntWithinRange(val, min, max int) bool { + return val > min && val < max +} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go new file mode 100644 index 0000000000..17e4512743 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -0,0 +1,387 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" + "sync" +) + +// DefaultUserAgent is the default User-Agent string set in the request header. +const DefaultUserAgent = "gophercloud/2.0.0" + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. + // All the strings to prepend are accumulated and prepended in the Join method. + prepend []string +} + +// Prepend prepends a user-defined string to the default User-Agent string. Users +// may pass in one or more strings to prepend. +func (ua *UserAgent) Prepend(s ...string) { + ua.prepend = append(s, ua.prepend...) +} + +// Join concatenates all the user-defined User-Agend strings with the default +// Gophercloud User-Agent string. +func (ua *UserAgent) Join() string { + uaSlice := append(ua.prepend, DefaultUserAgent) + return strings.Join(uaSlice, " ") +} + +// ProviderClient stores details that are required to interact with any +// services within a specific provider's API. +// +// Generally, you acquire a ProviderClient by calling the NewClient method in +// the appropriate provider's child package, providing whatever authentication +// credentials are required. +type ProviderClient struct { + // IdentityBase is the base URL used for a particular provider's identity + // service - it will be used when issuing authenticatation requests. It + // should point to the root resource of the identity service, not a specific + // identity version. + IdentityBase string + + // IdentityEndpoint is the identity endpoint. This may be a specific version + // of the identity service. If this is the case, this endpoint is used rather + // than querying versions first. + IdentityEndpoint string + + // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively + TokenID string + + // EndpointLocator describes how this provider discovers the endpoints for + // its constituent services. + EndpointLocator EndpointLocator + + // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. + HTTPClient http.Client + + // UserAgent represents the User-Agent header in the HTTP request. + UserAgent UserAgent + + // ReauthFunc is the function used to re-authenticate the user if the request + // fails with a 401 HTTP response code. This a needed because there may be multiple + // authentication functions for different Identity service versions. + ReauthFunc func() error + + mut *sync.RWMutex + + reauthmut *reauthlock +} + +type reauthlock struct { + sync.RWMutex + reauthing bool +} + +// AuthenticatedHeaders returns a map of HTTP headers that are common for all +// authenticated service requests. +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.reauthmut != nil { + client.reauthmut.RLock() + if client.reauthmut.reauthing { + client.reauthmut.RUnlock() + return + } + client.reauthmut.RUnlock() + } + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = t +} + +//Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +//called because of a 401 response, the caller may pass the previous token. In +//this case, the reauthentication can be skipped if another thread has already +//reauthenticated in the meantime. If no previous token is known, an empty +//string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.mut == nil { + return client.ReauthFunc() + } + client.mut.Lock() + defer client.mut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.Unlock() + return +} + +// RequestOpts customizes the behavior of the provider.Request() method. +type RequestOpts struct { + // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The + // content type of the request will default to "application/json" unless overridden by MoreHeaders. + // It's an error to specify both a JSONBody and a RawBody. + JSONBody interface{} + // RawBody contains an io.Reader that will be consumed by the request directly. No content-type + // will be set unless one is provided explicitly by MoreHeaders. + RawBody io.Reader + // JSONResponse, if provided, will be populated with the contents of the response body parsed as + // JSON. + JSONResponse interface{} + // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If + // the response has a different code, an error will be returned. + OkCodes []int + // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is + // provided with a blank value (""), that header will be *omitted* instead: use this to suppress + // the default Accept header or an inferred Content-Type, for example. + MoreHeaders map[string]string + // ErrorContext specifies the resource error type to return if an error is encountered. + // This lets resources override default error messages based on the response status code. + ErrorContext error +} + +var applicationJSON = "application/json" + +// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication +// header will automatically be provided. +func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + var body io.Reader + var contentType *string + + // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided + // io.ReadSeeker as-is. Default the content-type to application/json. + if options.JSONBody != nil { + if options.RawBody != nil { + panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") + } + + rendered, err := json.Marshal(options.JSONBody) + if err != nil { + return nil, err + } + + body = bytes.NewReader(rendered) + contentType = &applicationJSON + } + + if options.RawBody != nil { + body = options.RawBody + } + + // Construct the http.Request. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // modify or omit any header. + if contentType != nil { + req.Header.Set("Content-Type", *contentType) + } + req.Header.Set("Accept", applicationJSON) + + // Set the User-Agent header + req.Header.Set("User-Agent", client.UserAgent.Join()) + + if options.MoreHeaders != nil { + for k, v := range options.MoreHeaders { + if v != "" { + req.Header.Set(k, v) + } else { + req.Header.Del(k) + } + } + } + + // get latest token from client + for k, v := range client.AuthenticatedHeaders() { + req.Header.Set(k, v) + } + + // Set connection parameter to close the connection immediately when we've got the response + req.Close = true + + prereqtok := req.Header.Get("X-Auth-Token") + + // Issue the request. + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + + // Allow default OkCodes if none explicitly set + if options.OkCodes == nil { + options.OkCodes = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range options.OkCodes { + if resp.StatusCode == code { + ok = true + break + } + } + + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + respErr := ErrUnexpectedResponseCode{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, + } + + errType := options.ErrorContext + switch resp.StatusCode { + case http.StatusBadRequest: + err = ErrDefault400{respErr} + if error400er, ok := errType.(Err400er); ok { + err = error400er.Error400(respErr) + } + case http.StatusUnauthorized: + if client.ReauthFunc != nil { + err = client.Reauthenticate(prereqtok) + if err != nil { + e := &ErrUnableToReauthenticate{} + e.ErrOriginal = respErr + return nil, e + } + if options.RawBody != nil { + if seeker, ok := options.RawBody.(io.Seeker); ok { + seeker.Seek(0, 0) + } + } + resp, err = client.Request(method, url, options) + if err != nil { + switch err.(type) { + case *ErrUnexpectedResponseCode: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err.(*ErrUnexpectedResponseCode) + return nil, e + default: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err + return nil, e + } + } + return resp, nil + } + err = ErrDefault401{respErr} + if error401er, ok := errType.(Err401er); ok { + err = error401er.Error401(respErr) + } + case http.StatusForbidden: + err = ErrDefault403{respErr} + if error403er, ok := errType.(Err403er); ok { + err = error403er.Error403(respErr) + } + case http.StatusNotFound: + err = ErrDefault404{respErr} + if error404er, ok := errType.(Err404er); ok { + err = error404er.Error404(respErr) + } + case http.StatusMethodNotAllowed: + err = ErrDefault405{respErr} + if error405er, ok := errType.(Err405er); ok { + err = error405er.Error405(respErr) + } + case http.StatusRequestTimeout: + err = ErrDefault408{respErr} + if error408er, ok := errType.(Err408er); ok { + err = error408er.Error408(respErr) + } + case 429: + err = ErrDefault429{respErr} + if error429er, ok := errType.(Err429er); ok { + err = error429er.Error429(respErr) + } + case http.StatusInternalServerError: + err = ErrDefault500{respErr} + if error500er, ok := errType.(Err500er); ok { + err = error500er.Error500(respErr) + } + case http.StatusServiceUnavailable: + err = ErrDefault503{respErr} + if error503er, ok := errType.(Err503er); ok { + err = error503er.Error503(respErr) + } + } + + if err == nil { + err = respErr + } + + return resp, err + } + + // Parse the response body as JSON, if requested to do so. + if options.JSONResponse != nil { + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { + return nil, err + } + } + + return resp, nil +} + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "PATCH": + return []int{200, 204} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go new file mode 100644 index 0000000000..e64feee19e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -0,0 +1,382 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +/* +Result is an internal type to be used by individual resource packages, but its +methods will be available on a wide variety of user-facing embedding types. + +It acts as a base struct that other Result types, returned from request +functions, can embed for convenience. All Results capture basic information +from the HTTP transaction that was performed, including the response body, +HTTP headers, and any errors that happened. + +Generally, each Result type will have an Extract method that can be used to +further interpret the result's payload in a specific context. Extensions or +providers can then provide additional extraction functions to pull out +provider- or extension-specific information as well. +*/ +type Result struct { + // Body is the payload of the HTTP response from the server. In most cases, + // this will be the deserialized JSON structure. + Body interface{} + + // Header contains the HTTP header structure from the original response. + Header http.Header + + // Err is an error that occurred during the operation. It's deferred until + // extraction to make it easier to chain the Extract call. + Err error +} + +// ExtractInto allows users to provide an object into which `Extract` will extract +// the `Result.Body`. This would be useful for OpenStack providers that have +// different fields in the response object than OpenStack proper. +func (r Result) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + if reader, ok := r.Body.(io.Reader); ok { + if readCloser, ok := reader.(io.Closer); ok { + defer readCloser.Close() + } + return json.NewDecoder(reader).Decode(to) + } + + b, err := json.Marshal(r.Body) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +func (r Result) extractIntoPtr(to interface{}, label string) error { + if label == "" { + return r.ExtractInto(&to) + } + + var m map[string]interface{} + err := r.ExtractInto(&m) + if err != nil { + return err + } + + b, err := json.Marshal(m[label]) + if err != nil { + return err + } + + toValue := reflect.ValueOf(to) + if toValue.Kind() == reflect.Ptr { + toValue = toValue.Elem() + } + + switch toValue.Kind() { + case reflect.Slice: + typeOfV := toValue.Type().Elem() + if typeOfV.Kind() == reflect.Struct { + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) + newType := reflect.New(typeOfV).Elem() + + for _, v := range m[label].([]interface{}) { + b, err := json.Marshal(v) + if err != nil { + return err + } + + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + newSlice = reflect.Append(newSlice, newType) + } + toValue.Set(newSlice) + } + } + case reflect.Struct: + typeOfV := toValue.Type() + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + for i := 0; i < toValue.NumField(); i++ { + toField := toValue.Field(i) + if toField.Kind() == reflect.Struct { + s := toField.Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + } + } + } + + err = json.Unmarshal(b, &to) + return err +} + +// ExtractIntoStructPtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying struct type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Struct: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to struct, got: %v", t) + } +} + +// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying slice type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Slice: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to slice, got: %v", t) + } +} + +// PrettyPrintJSON creates a string containing the full response body as +// pretty-printed JSON. It's useful for capturing test fixtures and for +// debugging extraction bugs. If you include its output in an issue related to +// a buggy extraction function, we will all love you forever. +func (r Result) PrettyPrintJSON() string { + pretty, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + panic(err.Error()) + } + return string(pretty) +} + +// ErrResult is an internal type to be used by individual resource packages, but +// its methods will be available on a wide variety of user-facing embedding +// types. +// +// It represents results that only contain a potential error and +// nothing else. Usually, if the operation executed successfully, the Err field +// will be nil; otherwise it will be stocked with a relevant error. Use the +// ExtractErr method +// to cleanly pull it out. +type ErrResult struct { + Result +} + +// ExtractErr is a function that extracts error information, or nil, from a result. +func (r ErrResult) ExtractErr() error { + return r.Err +} + +/* +HeaderResult is an internal type to be used by individual resource packages, but +its methods will be available on a wide variety of user-facing embedding types. + +It represents a result that only contains an error (possibly nil) and an +http.Header. This is used, for example, by the objectstorage packages in +openstack, because most of the operations don't return response bodies, but do +have relevant information in headers. +*/ +type HeaderResult struct { + Result +} + +// ExtractInto allows users to provide an object into which `Extract` will +// extract the http.Header headers of the result. +func (r HeaderResult) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + tmpHeaderMap := map[string]string{} + for k, v := range r.Header { + if len(v) > 0 { + tmpHeaderMap[k] = v[0] + } + } + + b, err := json.Marshal(tmpHeaderMap) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +// RFC3339Milli describes a common time format used by some API responses. +const RFC3339Milli = "2006-01-02T15:04:05.999999Z" + +type JSONRFC3339Milli time.Time + +func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { + b := bytes.NewBuffer(data) + dec := json.NewDecoder(b) + var s string + if err := dec.Decode(&s); err != nil { + return err + } + t, err := time.Parse(RFC3339Milli, s) + if err != nil { + return err + } + *jt = JSONRFC3339Milli(t) + return nil +} + +const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" + +type JSONRFC3339MilliNoZ time.Time + +func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339MilliNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339MilliNoZ(t) + return nil +} + +type JSONRFC1123 time.Time + +func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + return err + } + *jt = JSONRFC1123(t) + return nil +} + +type JSONUnix time.Time + +func (jt *JSONUnix) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + unix, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + t = time.Unix(unix, 0) + *jt = JSONUnix(t) + return nil +} + +// RFC3339NoZ is the time format used in Heat (Orchestration). +const RFC3339NoZ = "2006-01-02T15:04:05" + +type JSONRFC3339NoZ time.Time + +func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339NoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339NoZ(t) + return nil +} + +/* +Link is an internal type to be used in packages of collection resources that are +paginated in a certain way. + +It's a response substructure common to many paginated collection results that is +used to point to related pages. Usually, the one we care about is the one with +Rel field set to "next". +*/ +type Link struct { + Href string `json:"href"` + Rel string `json:"rel"` +} + +/* +ExtractNextURL is an internal function useful for packages of collection +resources that are paginated in a certain way. + +It attempts to extract the "next" URL from slice of Link structs, or +"" if no such URL is present. +*/ +func ExtractNextURL(links []Link) (string, error) { + var url string + + for _, l := range links { + if l.Rel == "next" { + url = l.Href + } + } + + if url == "" { + return "", nil + } + + return url, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go new file mode 100644 index 0000000000..d1a48fea35 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -0,0 +1,124 @@ +package gophercloud + +import ( + "io" + "net/http" + "strings" +) + +// ServiceClient stores details required to interact with a specific service API implemented by a provider. +// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. +type ServiceClient struct { + // ProviderClient is a reference to the provider that implements this service. + *ProviderClient + + // Endpoint is the base URL of the service's API, acquired from a service catalog. + // It MUST end with a /. + Endpoint string + + // ResourceBase is the base URL shared by the resources within a service's API. It should include + // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used + // as-is, instead. + ResourceBase string + + // This is the service client type (e.g. compute, sharev2). + // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. + // It is only exported because it gets set in a different package. + Type string + + // The microversion of the service to use. Set this to use a particular microversion. + Microversion string +} + +// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. +func (client *ServiceClient) ResourceBaseURL() string { + if client.ResourceBase != "" { + return client.ResourceBase + } + return client.Endpoint +} + +// ServiceURL constructs a URL for a resource belonging to this provider. +func (client *ServiceClient) ServiceURL(parts ...string) string { + return client.ResourceBaseURL() + strings.Join(parts, "/") +} + +func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + if opts.MoreHeaders == nil { + opts.MoreHeaders = make(map[string]string) + } + + if client.Microversion != "" { + client.setMicroversionHeader(opts) + } +} + +// Get calls `Request` with the "GET" HTTP verb. +func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, JSONResponse, opts) + return client.Request("GET", url, opts) +} + +// Post calls `Request` with the "POST" HTTP verb. +func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("POST", url, opts) +} + +// Put calls `Request` with the "PUT" HTTP verb. +func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PUT", url, opts) +} + +// Patch calls `Request` with the "PATCH" HTTP verb. +func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PATCH", url, opts) +} + +// Delete calls `Request` with the "DELETE" HTTP verb. +func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("DELETE", url, opts) +} + +func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { + switch client.Type { + case "compute": + opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion + case "sharev2": + opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion + } + + if client.Type != "" { + opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go new file mode 100644 index 0000000000..68f9a5d3ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/util.go @@ -0,0 +1,102 @@ +package gophercloud + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" + "time" +) + +// WaitFor polls a predicate function, once per second, up to a timeout limit. +// This is useful to wait for a resource to transition to a certain state. +// To handle situations when the predicate might hang indefinitely, the +// predicate will be prematurely cancelled after the timeout. +// Resource packages will wrap this in a more convenient function that's +// specific to a certain resource, but it can also be useful on its own. +func WaitFor(timeout int, predicate func() (bool, error)) error { + type WaitForResult struct { + Success bool + Error error + } + + start := time.Now().Unix() + + for { + // If a timeout is set, and that's been exceeded, shut it down. + if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { + return fmt.Errorf("A timeout occurred") + } + + time.Sleep(1 * time.Second) + + var result WaitForResult + ch := make(chan bool, 1) + go func() { + defer close(ch) + satisfied, err := predicate() + result.Success = satisfied + result.Error = err + }() + + select { + case <-ch: + if result.Error != nil { + return result.Error + } + if result.Success { + return nil + } + // If the predicate has not finished by the timeout, cancel it. + case <-time.After(time.Duration(timeout) * time.Second): + return fmt.Errorf("A timeout occurred") + } + } +} + +// NormalizeURL is an internal function to be used by provider clients. +// +// It ensures that each endpoint URL has a closing `/`, as expected by +// ServiceClient's methods. +func NormalizeURL(url string) string { + if !strings.HasSuffix(url, "/") { + return url + "/" + } + return url +} + +// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as +// a reference in the filesystem, if necessary. basePath is assumed to contain +// either '.' when first used, or the file:// type fqdn of the parent resource. +// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml +func NormalizePathURL(basePath, rawPath string) (string, error) { + u, err := url.Parse(rawPath) + if err != nil { + return "", err + } + // if a scheme is defined, it must be a fqdn already + if u.Scheme != "" { + return u.String(), nil + } + // if basePath is a url, then child resources are assumed to be relative to it + bu, err := url.Parse(basePath) + if err != nil { + return "", err + } + var basePathSys, absPathSys string + if bu.Scheme != "" { + basePathSys = filepath.FromSlash(bu.Path) + absPathSys = filepath.Join(basePathSys, rawPath) + bu.Path = filepath.ToSlash(absPathSys) + return bu.String(), nil + } + + absPathSys = filepath.Join(basePath, rawPath) + u.Path = filepath.ToSlash(absPathSys) + if err != nil { + return "", err + } + u.Scheme = "file" + return u.String(), nil + +} diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml index b5ffbe03d8..2bca4c599f 100644 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -4,7 +4,6 @@ go: - 1.6.x - 1.7.x - 1.8.x - - 1.9.x - master matrix: allow_failures: diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go index b26e167f0f..8239edc2cb 100644 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -41,11 +41,7 @@ type Cache interface { // cacheKey returns the cache key for req. func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } + return req.URL.String() } // CachedResponse returns the cached http.Response for req if present, and nil diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index cb416b394f..68d097a1c0 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -47,7 +47,7 @@ func (c *LRU) Purge() { c.evictList.Init() } -// Add adds a value to the cache. Returns true if an eviction occurred. +// Add adds a value to the cache. Returns true if an eviction occured. func (c *LRU) Add(key, value interface{}) bool { // Check for existing item if ent, ok := c.items[key]; ok { diff --git a/vendor/github.com/howeyc/gopass/.travis.yml b/vendor/github.com/howeyc/gopass/.travis.yml deleted file mode 100644 index cc5d509fdf..0000000000 --- a/vendor/github.com/howeyc/gopass/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -os: - - linux - - osx - -go: - - 1.3 - - 1.4 - - 1.5 - - tip diff --git a/vendor/github.com/howeyc/gopass/LICENSE.txt b/vendor/github.com/howeyc/gopass/LICENSE.txt deleted file mode 100644 index 14f74708a4..0000000000 --- a/vendor/github.com/howeyc/gopass/LICENSE.txt +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012 Chris Howey - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE b/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE deleted file mode 100644 index da23621dc8..0000000000 --- a/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE +++ /dev/null @@ -1,384 +0,0 @@ -Unless otherwise noted, all files in this distribution are released -under the Common Development and Distribution License (CDDL). -Exceptions are noted within the associated source files. - --------------------------------------------------------------------- - - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates - or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), - and the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing - Original Software with files containing Modifications, in - each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form other - than Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this - License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed - herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original - Software or previous Modifications; - - B. Any new file that contains any part of the Original - Software or previous Modifications; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable - form of computer software code that is originally released - under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, - process, and apparatus claims, in any patent Licensable by - grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms - of, this License. For legal entities, "You" includes any - entity which controls, is controlled by, or is under common - control with You. For purposes of this definition, - "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by - contract or otherwise, or (b) ownership of more than fifty - percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the Initial - Developer hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, - reproduce, modify, display, perform, sublicense and - distribute the Original Software (or portions thereof), - with or without Modifications, and/or as part of a Larger - Work; and - - (b) under Patent Claims infringed by the making, using or - selling of Original Software, to make, have made, use, - practice, sell, and offer for sale, and/or otherwise - dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are - effective on the date Initial Developer first distributes - or otherwise makes the Original Software available to a - third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original - Software, or (2) for infringements caused by: (i) the - modification of the Original Software, or (ii) the - combination of the Original Software with other software - or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, - modify, display, perform, sublicense and distribute the - Modifications created by such Contributor (or portions - thereof), either on an unmodified basis, with other - Modifications, as Covered Software and/or as part of a - Larger Work; and - - (b) under Patent Claims infringed by the making, using, or - selling of Modifications made by that Contributor either - alone and/or in combination with its Contributor Version - (or portions of such combination), to make, use, sell, - offer for sale, have made, and/or otherwise dispose of: - (1) Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions - of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are - effective on the date Contributor first distributes or - otherwise makes the Modifications available to a third - party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted - from the Contributor Version; (2) for infringements caused - by: (i) third party modifications of Contributor Version, - or (ii) the combination of Modifications made by that - Contributor with other software (except as part of the - Contributor Version) or other devices; or (3) under Patent - Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in Source - Code form and that Source Code form must be distributed only under - the terms of this License. You must include a copy of this - License with every copy of the Source Code form of the Covered - Software You distribute or otherwise make available. You must - inform recipients of any such Covered Software in Executable form - as to how they can obtain such Covered Software in Source Code - form in a reasonable manner on or through a medium customarily - used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or - You have sufficient rights to grant the rights conveyed by this - License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may - not remove or alter any copyright, patent or trademark notices - contained within the Covered Software, or any notices of licensing - or any descriptive text giving attribution to any Contributor or - the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version - of this License or the recipients' rights hereunder. You may - choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of - Covered Software. However, you may do so only on Your own behalf, - and not on behalf of the Initial Developer or any Contributor. - You must make it absolutely clear that any such warranty, support, - indemnity or liability obligation is offered by You alone, and You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial Developer or - such Contributor as a result of warranty, support, indemnity or - liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software - under the terms of this License or under the terms of a license of - Your choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the - Covered Software in Executable form under a different license, You - must make it absolutely clear that any terms which differ from - this License are offered by You alone, not by the Initial - Developer or Contributor. You hereby agree to indemnify the - Initial Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of any - such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and - distribute the Larger Work as a single product. In such a case, - You must make sure the requirements of this License are fulfilled - for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and may - publish revised and/or new versions of this License from time to - time. Each version will be given a distinguishing version number. - Except as provided in Section 4.3, no one other than the license - steward has the right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. - If the Initial Developer includes a notice in the Original - Software prohibiting it from being distributed or otherwise made - available under any subsequent version of the License, You must - distribute and make the Covered Software available under the terms - of the version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to use, - distribute or otherwise make the Covered Software available under - the terms of any subsequent version of the License published by - the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license - and remove any references to the name of the license steward - (except to note that the license differs from this License); and - (b) otherwise make it clear that the license contains terms which - differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY - NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond - the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that - the Participant Software (meaning the Contributor Version where - the Participant is a Contributor or the Original Software where - the Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if - the Initial Developer is not the Participant) and all Contributors - under Sections 2.1 and/or 2.2 of this License shall, upon 60 days - notice from Participant terminate prospectively and automatically - at the expiration of such 60 day notice period, unless if within - such 60 day period You withdraw Your claim with respect to the - Participant Software against such Participant either unilaterally - or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 - C.F.R. 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 - (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 - C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all - U.S. Government End Users acquire Covered Software with only those - rights set forth herein. This U.S. Government Rights clause is in - lieu of, and supersedes, any other FAR, DFAR, or other clause or - provision that addresses Government rights in computer software - under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed - by the law of the jurisdiction specified in a notice contained - within the Original Software (except to the extent applicable law, - if any, provides otherwise), excluding such jurisdiction's - conflict-of-law provisions. Any litigation relating to this - License shall be subject to the jurisdiction of the courts located - in the jurisdiction and venue specified in a notice contained - within the Original Software, with the losing party responsible - for costs, including, without limitation, court costs and - reasonable attorneys' fees and expenses. The application of the - United Nations Convention on Contracts for the International Sale - of Goods is expressly excluded. Any law or regulation which - provides that the language of a contract shall be construed - against the drafter shall not apply to this License. You agree - that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - --------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND -DISTRIBUTION LICENSE (CDDL) - -For Covered Software in this distribution, this License shall -be governed by the laws of the State of California (excluding -conflict-of-law provisions). - -Any litigation relating to this License shall be subject to the -jurisdiction of the Federal Courts of the Northern District of -California and the state courts of the State of California, with -venue lying in Santa Clara County, California. diff --git a/vendor/github.com/howeyc/gopass/README.md b/vendor/github.com/howeyc/gopass/README.md deleted file mode 100644 index 2d6a4e72c9..0000000000 --- a/vendor/github.com/howeyc/gopass/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# getpasswd in Go [![GoDoc](https://godoc.org/github.com/howeyc/gopass?status.svg)](https://godoc.org/github.com/howeyc/gopass) [![Build Status](https://secure.travis-ci.org/howeyc/gopass.png?branch=master)](http://travis-ci.org/howeyc/gopass) - -Retrieve password from user terminal or piped input without echo. - -Verified on BSD, Linux, and Windows. - -Example: -```go -package main - -import "fmt" -import "github.com/howeyc/gopass" - -func main() { - fmt.Printf("Password: ") - - // Silent. For printing *'s use gopass.GetPasswdMasked() - pass, err := gopass.GetPasswd() - if err != nil { - // Handle gopass.ErrInterrupted or getch() read error - } - - // Do something with pass -} -``` - -Caution: Multi-byte characters not supported! diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go deleted file mode 100644 index 5900e63ea8..0000000000 --- a/vendor/github.com/howeyc/gopass/pass.go +++ /dev/null @@ -1,93 +0,0 @@ -package gopass - -import ( - "errors" - "fmt" - "io" - "os" -) - -var defaultGetCh = func() (byte, error) { - buf := make([]byte, 1) - if n, err := os.Stdin.Read(buf); n == 0 || err != nil { - if err != nil { - return 0, err - } - return 0, io.EOF - } - return buf[0], nil -} - -var ( - maxLength = 512 - ErrInterrupted = errors.New("interrupted") - ErrMaxLengthExceeded = fmt.Errorf("maximum byte limit (%v) exceeded", maxLength) - - // Provide variable so that tests can provide a mock implementation. - getch = defaultGetCh -) - -// getPasswd returns the input read from terminal. -// If masked is true, typing will be matched by asterisks on the screen. -// Otherwise, typing will echo nothing. -func getPasswd(masked bool) ([]byte, error) { - var err error - var pass, bs, mask []byte - if masked { - bs = []byte("\b \b") - mask = []byte("*") - } - - if isTerminal(os.Stdin.Fd()) { - if oldState, err := makeRaw(os.Stdin.Fd()); err != nil { - return pass, err - } else { - defer func() { - restore(os.Stdin.Fd(), oldState) - fmt.Println() - }() - } - } - - // Track total bytes read, not just bytes in the password. This ensures any - // errors that might flood the console with nil or -1 bytes infinitely are - // capped. - var counter int - for counter = 0; counter <= maxLength; counter++ { - if v, e := getch(); e != nil { - err = e - break - } else if v == 127 || v == 8 { - if l := len(pass); l > 0 { - pass = pass[:l-1] - fmt.Print(string(bs)) - } - } else if v == 13 || v == 10 { - break - } else if v == 3 { - err = ErrInterrupted - break - } else if v != 0 { - pass = append(pass, v) - fmt.Print(string(mask)) - } - } - - if counter > maxLength { - err = ErrMaxLengthExceeded - } - - return pass, err -} - -// GetPasswd returns the password read from the terminal without echoing input. -// The returned byte array does not include end-of-line characters. -func GetPasswd() ([]byte, error) { - return getPasswd(false) -} - -// GetPasswdMasked returns the password read from the terminal, echoing asterisks. -// The returned byte array does not include end-of-line characters. -func GetPasswdMasked() ([]byte, error) { - return getPasswd(true) -} diff --git a/vendor/github.com/howeyc/gopass/terminal.go b/vendor/github.com/howeyc/gopass/terminal.go deleted file mode 100644 index 0835641462..0000000000 --- a/vendor/github.com/howeyc/gopass/terminal.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !solaris - -package gopass - -import "golang.org/x/crypto/ssh/terminal" - -type terminalState struct { - state *terminal.State -} - -func isTerminal(fd uintptr) bool { - return terminal.IsTerminal(int(fd)) -} - -func makeRaw(fd uintptr) (*terminalState, error) { - state, err := terminal.MakeRaw(int(fd)) - - return &terminalState{ - state: state, - }, err -} - -func restore(fd uintptr, oldState *terminalState) error { - return terminal.Restore(int(fd), oldState.state) -} diff --git a/vendor/github.com/howeyc/gopass/terminal_solaris.go b/vendor/github.com/howeyc/gopass/terminal_solaris.go deleted file mode 100644 index 257e1b4e81..0000000000 --- a/vendor/github.com/howeyc/gopass/terminal_solaris.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License, Version 1.0 only - * (the "License"). You may not use this file except in compliance - * with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or http://www.opensolaris.org/os/licensing. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -// Below is derived from Solaris source, so CDDL license is included. - -package gopass - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -type terminalState struct { - state *unix.Termios -} - -// isTerminal returns true if there is a terminal attached to the given -// file descriptor. -// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func isTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} - -// makeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c -func makeRaw(fd uintptr) (*terminalState, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - if err != nil { - return nil, err - } - oldTermios := *oldTermiosPtr - - newTermios := oldTermios - newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL - if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil { - return nil, err - } - - return &terminalState{ - state: oldTermiosPtr, - }, nil -} - -func restore(fd uintptr, oldState *terminalState) error { - return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state) -} diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore index 501fcdc9a6..15556530a8 100644 --- a/vendor/github.com/json-iterator/go/.gitignore +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -1,3 +1,4 @@ -.idea +/vendor +/bug_test.go /coverage.txt -/profile.out +/.idea diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 0000000000..3719afe8e0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/json-iterator/go" + packages = ["."] + revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" + version = "1.1.3" + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f" + version = "1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 0000000000..5801ffa1e9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.0" diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 3a0d680983..54d5afe957 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,6 +8,8 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + ``` Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com ``` @@ -29,6 +31,9 @@ Raw Result (easyjson requires static code generation) | easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | | jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +Always benchmark with your own workload. +The result depends heavily on the data input. + # Usage 100% compatibility with standard lib diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 0000000000..f371bfed76 --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,148 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + if iter.head != iter.tail { + return true + } + return iter.loadMore() +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse() +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse() +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse() +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse() +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 0000000000..daecfed615 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,321 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/any_array.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_array.go rename to vendor/github.com/json-iterator/go/any_array.go diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_bool.go rename to vendor/github.com/json-iterator/go/any_bool.go diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/any_float.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_float.go rename to vendor/github.com/json-iterator/go/any_float.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int32.go rename to vendor/github.com/json-iterator/go/any_int32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int64.go rename to vendor/github.com/json-iterator/go/any_int64.go diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_invalid.go rename to vendor/github.com/json-iterator/go/any_invalid.go diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_nil.go rename to vendor/github.com/json-iterator/go/any_nil.go diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 0000000000..9d1e901a66 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/any_object.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_object.go rename to vendor/github.com/json-iterator/go/any_object.go diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 0000000000..a4b93c78c8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint32.go rename to vendor/github.com/json-iterator/go/any_uint32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint64.go rename to vendor/github.com/json-iterator/go/any_uint64.go diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100755 index 0000000000..b45ef68831 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 0000000000..835819129c --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + extensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + if len(encoderExtension) > 0 { + api.extensions = append(api.extensions, encoderExtension) + } + if len(decoderExtension) > 0 { + api.extensions = append(api.extensions, decoderExtension) + } + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse() *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return false + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extensions = append(cfg.extensions, extension) +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse().Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go deleted file mode 100644 index 40a701ab6b..0000000000 --- a/vendor/github.com/json-iterator/go/feature_adapter.go +++ /dev/null @@ -1,132 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -func lastNotSpacePos(data []byte) int { - for i := len(data) - 1; i >= 0; i-- { - if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { - return i + 1 - } - } - return 0 -} - -// UnmarshalFromString convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - return adapter.iter.head != adapter.iter.tail -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) -func (adapter *Decoder) UseNumber() { - origCfg := adapter.iter.cfg.configBeforeFrozen - origCfg.UseNumber = true - adapter.iter.cfg = origCfg.Froze().(*frozenConfig) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - adapter.stream.cfg.indentionStep = len(indent) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.Froze().(*frozenConfig) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return ConfigDefault.Valid(data) -} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go deleted file mode 100644 index 6733dce4cc..0000000000 --- a/vendor/github.com/json-iterator/go/feature_any.go +++ /dev/null @@ -1,242 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - // TODO: add Set - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - return WrapUint64(uint64(val.(uint))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go deleted file mode 100644 index 4e1c27641d..0000000000 --- a/vendor/github.com/json-iterator/go/feature_any_number.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import "unsafe" - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go deleted file mode 100644 index abf060bd59..0000000000 --- a/vendor/github.com/json-iterator/go/feature_any_string.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - endPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - endPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go deleted file mode 100644 index 687210820b..0000000000 --- a/vendor/github.com/json-iterator/go/feature_config.go +++ /dev/null @@ -1,343 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "errors" - "io" - "reflect" - "sync/atomic" - "unsafe" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - TagKey string - ValidateJsonRawMessage bool -} - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - decoderCache unsafe.Pointer - encoderCache unsafe.Pointer - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder - Valid(data []byte) bool -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, -}.Froze() - -// Froze forge API from config -func (cfg Config) Froze() API { - // TODO: cache frozen config - frozenConfig := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - streamPool: make(chan *Stream, 16), - iteratorPool: make(chan *Iterator, 16), - } - atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) - atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) - if cfg.MarshalFloatWith6Digits { - frozenConfig.marshalFloatWith6Digits() - } - if cfg.EscapeHTML { - frozenConfig.escapeHTML() - } - if cfg.UseNumber { - frozenConfig.useNumber() - } - if cfg.ValidateJsonRawMessage { - frozenConfig.validateJsonRawMessage() - } - frozenConfig.configBeforeFrozen = cfg - return frozenConfig -} - -func (cfg *frozenConfig) validateJsonRawMessage() { - encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { - rawMessage := *(*json.RawMessage)(ptr) - iter := cfg.BorrowIterator([]byte(rawMessage)) - iter.Read() - if iter.Error != nil { - stream.WriteRaw("null") - } else { - cfg.ReturnIterator(iter) - stream.WriteRaw(string(rawMessage)) - } - }, func(ptr unsafe.Pointer) bool { - return false - }} - cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) - cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) -} - -func (cfg *frozenConfig) useNumber() { - cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }}) -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) registerExtension(extension Extension) { - cfg.extensions = append(cfg.extensions, extension) -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits() { - // for better performance - cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) - cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML() { - cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - copied := map[reflect.Type]ValDecoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = decoder - done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - copied := map[reflect.Type]ValEncoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = encoder - done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.Froze().Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - data = data[:lastNotSpacePos(data)] - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") - } - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - data = data[:lastNotSpacePos(data)] - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - typ := reflect.TypeOf(v) - if typ.Kind() != reflect.Ptr { - // return non-pointer error - return errors.New("the second param must be ptr type") - } - iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - } - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} - -func (cfg *frozenConfig) Valid(data []byte) bool { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.Skip() - return iter.Error == nil -} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go deleted file mode 100644 index 54a3a0fd9c..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter.go +++ /dev/null @@ -1,321 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - captureStartedAt int - captured []byte - Error error -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely") - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - peekEnd := iter.head + 10 - if peekEnd > iter.tail { - peekEnd = iter.tail - } - parsing := string(iter.buf[peekStart:peekEnd]) - contextStart := iter.head - 50 - if contextStart < 0 { - contextStart = 0 - } - contextEnd := iter.head + 50 - if contextEnd > iter.tail { - contextEnd = iter.tail - } - context := string(iter.buf[contextStart:contextEnd]) - iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", - operation, msg, iter.head-peekStart, parsing, context) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go deleted file mode 100644 index 86f4599122..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_float.go +++ /dev/null @@ -1,341 +0,0 @@ -package jsoniter - -import ( - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go deleted file mode 100644 index 886879efdb..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_int.go +++ /dev/null @@ -1,258 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - return value - } - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go deleted file mode 100644 index 6ec8fb7fd7..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_object.go +++ /dev/null @@ -1,212 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode" - "unsafe" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - return string(iter.readObjectFieldAsBytes()) - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) - return - case ',': - return string(iter.readObjectFieldAsBytes()) - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -func (iter *Iterator) readFieldHash() int32 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c == '"' { - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if 'A' <= b && b <= 'Z' { - b += 'a' - 'A' - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - } - return int32(hash) - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } - } - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 -} - -func calcHash(str string) int32 { - hash := int64(0x811c9dc5) - for _, b := range str { - hash ^= int64(unicode.ToLower(b)) - hash *= 0x1000193 - } - return int32(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go deleted file mode 100644 index 047d58a4bc..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build jsoniter-sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - case ']': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - case '}': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go deleted file mode 100644 index cc1933de2e..0000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go +++ /dev/null @@ -1,89 +0,0 @@ -//+build !jsoniter-sloppy - -package jsoniter - -import "fmt" - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - iter.ReadFloat32() - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("trySkipString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go deleted file mode 100644 index e187b200a9..0000000000 --- a/vendor/github.com/json-iterator/go/feature_json_number.go +++ /dev/null @@ -1,31 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "strconv" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go deleted file mode 100644 index 73962bc6f6..0000000000 --- a/vendor/github.com/json-iterator/go/feature_pool.go +++ /dev/null @@ -1,57 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - select { - case stream := <-cfg.streamPool: - stream.Reset(writer) - return stream - default: - return NewStream(cfg, writer, 512) - } -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.Error = nil - select { - case cfg.streamPool <- stream: - return - default: - return - } -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - select { - case iter := <-cfg.iteratorPool: - iter.ResetBytes(data) - return iter - default: - return ParseBytes(cfg, data) - } -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - select { - case cfg.iteratorPool <- iter: - return - default: - return - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go deleted file mode 100644 index 4483e34b84..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect.go +++ /dev/null @@ -1,703 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "time" - "unsafe" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) - EncodeInterface(val interface{}, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -// WriteToStream the default implementation for TypeEncoder method EncodeInterface -func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteNil() - return - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -var jsonNumberType reflect.Type -var jsoniterNumberType reflect.Type -var jsonRawMessageType reflect.Type -var jsoniterRawMessageType reflect.Type -var anyType reflect.Type -var marshalerType reflect.Type -var unmarshalerType reflect.Type -var textMarshalerType reflect.Type -var textUnmarshalerType reflect.Type - -func init() { - jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() - jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() - jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() - jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() - anyType = reflect.TypeOf((*Any)(nil)).Elem() - marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -} - -type optionalDecoder struct { - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type deferenceDecoder struct { - // only to deference a pointer - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type optionalEncoder struct { - valueEncoder ValEncoder -} - -func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if *((*unsafe.Pointer)(ptr)) == nil { - return true - } - return false -} - -type placeholderEncoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.getRealEncoder().Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.getRealEncoder().IsEmpty(ptr) -} - -func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { - for i := 0; i < 500; i++ { - realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderEncoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - return realDecoder - } - } - panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) -} - -type placeholderDecoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - for i := 0; i < 500; i++ { - realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderDecoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - realDecoder.Decode(ptr, iter) - return - } - } - panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) -} - -// emptyInterface is the header for an interface{} value. -type emptyInterface struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -// emptyInterface is the header for an interface with method (not interface{}) -type nonEmptyInterface struct { - // see ../runtime/iface.go:/Itab - itab *struct { - ityp unsafe.Pointer // static interface type - typ unsafe.Pointer // dynamic concrete type - link unsafe.Pointer - bad int32 - unused int32 - fun [100000]unsafe.Pointer // method table - } - word unsafe.Pointer -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - typ := reflect.TypeOf(obj) - cacheKey := typ.Elem() - decoder, err := decoderOfType(iter.cfg, cacheKey) - if err != nil { - iter.Error = err - return - } - e := (*emptyInterface)(unsafe.Pointer(&obj)) - decoder.Decode(e.word, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - typ := reflect.TypeOf(val) - cacheKey := typ - encoder, err := encoderOfType(stream.cfg, cacheKey) - if err != nil { - stream.Error = err - return - } - encoder.EncodeInterface(val, stream) -} - -type prefix string - -func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return decoder, err -} - -func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return encoder, err -} - -func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - cacheKey := typ - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder, nil - } - decoder = getTypeDecoderFromExtension(typ) - if decoder != nil { - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, nil - } - decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} - cfg.addDecoderToCache(cacheKey, decoder) - decoder, err := createDecoderOfType(cfg, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, err -} - -func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - typeName := typ.String() - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - if err != nil { - return nil, err - } - return &base64Codec{sliceDecoder: sliceDecoder}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - cacheKey := typ - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder, nil - } - encoder = getTypeEncoderFromExtension(typ) - if encoder != nil { - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, nil - } - encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} - cfg.addEncoderToCache(cacheKey, encoder) - encoder, err := createEncoderOfType(cfg, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, err -} - -func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} - } - return encoder, nil - } - if reflect.PtrTo(typ).Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(reflect.PtrTo(typ)) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - return encoder, nil - } - if typ.Implements(textMarshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &textMarshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} - } - return encoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - return &base64Codec{}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - return createEncoderOfSimpleType(cfg, typ) -} - -func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { - kind := typ.Kind() - switch kind { - case reflect.String: - return &stringCodec{}, nil - case reflect.Int: - return &intCodec{}, nil - case reflect.Int8: - return &int8Codec{}, nil - case reflect.Int16: - return &int16Codec{}, nil - case reflect.Int32: - return &int32Codec{}, nil - case reflect.Int64: - return &int64Codec{}, nil - case reflect.Uint: - return &uintCodec{}, nil - case reflect.Uint8: - return &uint8Codec{}, nil - case reflect.Uint16: - return &uint16Codec{}, nil - case reflect.Uint32: - return &uint32Codec{}, nil - case reflect.Uintptr: - return &uintptrCodec{}, nil - case reflect.Uint64: - return &uint64Codec{}, nil - case reflect.Float32: - return &float32Codec{}, nil - case reflect.Float64: - return &float64Codec{}, nil - case reflect.Bool: - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return &structEncoder{}, nil - case reflect.Array: - return &arrayEncoder{}, nil - case reflect.Slice: - return &sliceEncoder{}, nil - case reflect.Map: - return &mapEncoder{}, nil - case reflect.Ptr: - return &optionalEncoder{}, nil - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - elemType := typ.Elem() - decoder, err := decoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - return &optionalDecoder{elemType, decoder}, nil -} - -func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - elemEncoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - encoder := &optionalEncoder{elemEncoder} - if elemType.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return encoder, nil -} - -func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Interface() - return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil -} - -func extractInterface(val interface{}) emptyInterface { - return *((*emptyInterface)(unsafe.Pointer(&val))) -} - -func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - encoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Elem().Interface() - if cfg.sortMapKeys { - return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil - } - return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go deleted file mode 100644 index e23f187b7c..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_array.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &arrayDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return &arrayEncoder{typ, typ.Elem(), encoder}, nil -} - -type arrayEncoder struct { - arrayType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { - // special optimization for interface{} - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteArrayStart() - stream.WriteNil() - stream.WriteArrayEnd() - return - } - elemType := encoder.arrayType.Elem() - if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - if offset < decoder.arrayType.Size() { - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) - offset += decoder.elemType.Size() - } else { - iter.Skip() - } - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go deleted file mode 100644 index 74f4b8babb..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_extension.go +++ /dev/null @@ -1,413 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - onePtrEmbedded bool - onePtrOptimization bool - Type reflect.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field *reflect.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateDecoder(typ reflect.Type) ValDecoder - CreateEncoder(typ reflect.Type) ValEncoder - DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder := typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - decoder := typeDecoders[typ.Elem().String()] - if decoder != nil { - return &optionalDecoder{typ.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder := typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - encoder := typeEncoders[typ.Elem().String()] - if encoder != nil { - return &optionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - tag := field.Tag.Get(cfg.getTagKey()) - tagParts := strings.Split(tag, ",") - if tag == "-" { - continue - } - if field.Anonymous && (tag == "" || tagParts[0] == "") { - if field.Type.Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type.Elem()) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &optionalEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - fieldNames := calcFieldNames(field.Name, tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - var err error - decoder, err = decoderOfType(cfg, field.Type) - if len(fieldNames) > 0 && err != nil { - return nil, err - } - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - var err error - encoder, err = encoderOfType(cfg, field.Type) - if len(fieldNames) > 0 && err != nil { - return nil, err - } - // map is stored as pointer in the struct - if encoder != nil && field.Type.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - } - binding := &Binding{ - Field: &field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil -} -func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - onePtrEmbedded := false - onePtrOptimization := false - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { - onePtrEmbedded = true - } - fallthrough - case reflect.Map: - onePtrOptimization = true - case reflect.Struct: - onePtrOptimization = isStructOnePtr(firstField.Type) - } - } - structDescriptor := &StructDescriptor{ - onePtrEmbedded: onePtrEmbedded, - onePtrOptimization: onePtrOptimization, - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, cfg) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -func isStructOnePtr(typ reflect.Type) bool { - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - return true - case reflect.Map: - return true - case reflect.Struct: - return isStructOnePtr(firstField.Type) - } - } - return false -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type.Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go deleted file mode 100644 index 005671e01b..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_map.go +++ /dev/null @@ -1,244 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "reflect" - "sort" - "strconv" - "unsafe" -) - -type mapDecoder struct { - mapType reflect.Type - keyType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder - mapInterface emptyInterface -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type - mapInterface := decoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface).Elem() - if iter.ReadNil() { - realVal.Set(reflect.Zero(decoder.mapType)) - return - } - if realVal.IsNil() { - realVal.Set(reflect.MakeMap(realVal.Type())) - } - iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { - elem := reflect.New(decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) - // to put into map, we have to use reflection - keyType := decoder.keyType - // TODO: remove this from loop - switch { - case keyType.Kind() == reflect.String: - realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) - return true - case keyType.Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) - return true - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) - return true - default: - switch keyType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowInt(n) { - iter.ReportError("read map key as int64", "read int64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowUint(n) { - iter.ReportError("read map key as uint64", "read uint64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - } - } - iter.ReportError("read map key", "unexpected map key type "+keyType.String()) - return true - }) -} - -type mapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - stream.WriteObjectStart() - for i, key := range realVal.MapKeys() { - if i != 0 { - stream.WriteMore() - } - encodeMapKey(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -func encodeMapKey(key reflect.Value, stream *Stream) { - if key.Kind() == reflect.String { - stream.WriteString(key.String()) - return - } - if tm, ok := key.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - if err != nil { - stream.Error = err - return - } - stream.writeByte('"') - stream.Write(buf) - stream.writeByte('"') - return - } - switch key.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - stream.writeByte('"') - stream.WriteInt64(key.Int()) - stream.writeByte('"') - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - stream.writeByte('"') - stream.WriteUint64(key.Uint()) - stream.writeByte('"') - return - } - stream.Error = &json.UnsupportedTypeError{Type: key.Type()} -} - -func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} - -type sortKeysMapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - - // Extract and sort the keys. - keys := realVal.MapKeys() - sv := stringValues(make([]reflectWithString, len(keys))) - for i, v := range keys { - sv[i].v = v - if err := sv[i].resolve(); err != nil { - stream.Error = err - return - } - } - sort.Sort(sv) - - stream.WriteObjectStart() - for i, key := range sv { - if i != 0 { - stream.WriteMore() - } - stream.WriteVal(key.s) // might need html escape, so can not WriteString directly - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key.v).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflectWithString - -type reflectWithString struct { - v reflect.Value - s string -} - -func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() - return nil - } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - w.s = string(buf) - return err - } - switch w.v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) - return nil - } - return &json.UnsupportedTypeError{Type: w.v.Type()} -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } - -func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go deleted file mode 100644 index 95bd1e87cc..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_native.go +++ /dev/null @@ -1,764 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/base64" - "encoding/json" - "reflect" - "unsafe" -) - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type intCodec struct { -} - -func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int)(ptr)) = iter.ReadInt() - } -} - -func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt(*((*int)(ptr))) -} - -func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int)(ptr)) == 0 -} - -type uintptrCodec struct { -} - -func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) - } -} - -func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(uint64(*((*uintptr)(ptr)))) -} - -func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uintptr)(ptr)) == 0 -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uintCodec struct { -} - -func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint)(ptr)) = iter.ReadUint() - return - } -} - -func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint(*((*uint)(ptr))) -} - -func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type emptyInterfaceCodec struct { -} - -func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - existing := *((*interface{})(ptr)) - - // Checking for both typed and untyped nil pointers. - if existing != nil && - reflect.TypeOf(existing).Kind() == reflect.Ptr && - !reflect.ValueOf(existing).IsNil() { - - var ptrToExisting interface{} - for { - elem := reflect.ValueOf(existing).Elem() - if elem.Kind() != reflect.Ptr || elem.IsNil() { - break - } - ptrToExisting = existing - existing = elem.Interface() - } - - if iter.ReadNil() { - if ptrToExisting != nil { - nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) - reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) - } else { - *((*interface{})(ptr)) = nil - } - } else { - iter.ReadVal(existing) - } - - return - } - - if iter.ReadNil() { - *((*interface{})(ptr)) = nil - } else { - *((*interface{})(ptr)) = iter.Read() - } -} - -func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteVal(*((*interface{})(ptr))) -} - -func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - emptyInterface := (*emptyInterface)(ptr) - return emptyInterface.typ == nil -} - -type nonEmptyInterfaceCodec struct { -} - -func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - if nonEmptyInterface.itab == nil { - iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") - return - } - var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - iter.ReadVal(&i) - if e.word == nil { - nonEmptyInterface.itab = nil - } - nonEmptyInterface.word = e.word -} - -func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - var i interface{} - if nonEmptyInterface.itab != nil { - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - } - stream.WriteVal(i) -} - -func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - return nonEmptyInterface.word == nil -} - -type anyCodec struct { -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Any)(ptr)) = iter.ReadAny() -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - (*((*Any)(ptr))).WriteTo(stream) -} - -func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { - (val.(Any)).WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - return (*((*Any)(ptr))).Size() == 0 -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.Number)(ptr)))) -} - -func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.Number))) -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*Number)(ptr)))) -} - -func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(Number))) -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.RawMessage))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(RawMessage))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} - -type base64Codec struct { - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Len = 0 - ptrSlice.Cap = 0 - ptrSlice.Data = nil - return - } - switch iter.WhatIsNext() { - case StringValue: - encoding := base64.StdEncoding - src := iter.SkipAndReturnBytes() - src = src[1 : len(src)-1] - decodedLen := encoding.DecodedLen(len(src)) - dst := make([]byte, decodedLen) - len, err := encoding.Decode(dst, src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - dst = dst[:len] - dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Data = dstSlice.Data - ptrSlice.Cap = dstSlice.Cap - ptrSlice.Len = dstSlice.Len - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { - ptr := extractInterface(val).word - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type marshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler, ok := (*realInterface).(json.Marshaler) - if !ok { - stream.WriteVal(nil) - return - } - - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} -func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - stream.WriteString(string(bytes)) - } -} - -func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go deleted file mode 100644 index 59b1235c0d..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_object.go +++ /dev/null @@ -1,196 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "strings" - "unsafe" -) - -func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{}, nil - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - bindings := map[string]*Binding{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - return createStructDecoder(typ, fields) -} - -type structFieldEncoder struct { - field *reflect.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -type structEncoder struct { - onePtrEmbedded bool - onePtrOptimization bool - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() -} - -func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if encoder.onePtrOptimization { - if e.word == nil && encoder.onePtrEmbedded { - stream.WriteObjectStart() - stream.WriteObjectEnd() - return - } - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go deleted file mode 100644 index 7377eec7b3..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_slice.go +++ /dev/null @@ -1,149 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &sliceDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return &sliceEncoder{typ, typ.Elem(), encoder}, nil -} - -type sliceEncoder struct { - sliceType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - slice := (*sliceHeader)(ptr) - if slice.Data == nil { - stream.WriteNil() - return - } - if slice.Len == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(slice.Data) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - for i := 1; i < slice.Len; i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - slice := (*sliceHeader)(ptr) - return slice.Len == 0 -} - -type sliceDecoder struct { - sliceType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -// sliceHeader is a safe version of SliceHeader used within this package. -type sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - slice := (*sliceHeader)(ptr) - if iter.ReadNil() { - slice.Len = 0 - slice.Cap = 0 - slice.Data = nil - return - } - reuseSlice(slice, decoder.sliceType, 4) - slice.Len = 0 - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - growOne(slice, decoder.sliceType, decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) - offset += decoder.elemType.Size() - return true - }) -} - -// grow grows the slice s so that it can hold extra more values, allocating -// more capacity if needed. It also returns the old and new slice lengths. -func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { - newLen := slice.Len + 1 - if newLen <= slice.Cap { - slice.Len = newLen - return - } - newCap := slice.Cap - if newCap == 0 { - newCap = 1 - } else { - for newCap < newLen { - if slice.Len < 1024 { - newCap += newCap - } else { - newCap += newCap / 4 - } - } - } - newVal := reflect.MakeSlice(sliceType, newLen, newCap) - dst := unsafe.Pointer(newVal.Pointer()) - // copy old array into new array - originalBytesCount := uintptr(slice.Len) * elementType.Size() - srcPtr := (*[1 << 30]byte)(slice.Data) - dstPtr := (*[1 << 30]byte)(dst) - for i := uintptr(0); i < originalBytesCount; i++ { - dstPtr[i] = srcPtr[i] - } - slice.Data = dst - slice.Len = newLen - slice.Cap = newCap -} - -func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { - if expectedCap <= slice.Cap { - return - } - newVal := reflect.MakeSlice(sliceType, 0, expectedCap) - dst := unsafe.Pointer(newVal.Pointer()) - slice.Data = dst - slice.Cap = expectedCap -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go deleted file mode 100644 index b3417fd73a..0000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go +++ /dev/null @@ -1,916 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "strings" - "unsafe" -) - -func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { - knownHash := map[int32]struct{}{ - 0: {}, - } - switch len(fields) { - case 0: - return &skipObjectDecoder{typ}, nil - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil - } - case 2: - var fieldHash1 int32 - var fieldHash2 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil - case 3: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil - case 4: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4}, nil - case 5: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil - case 6: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil - case 7: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7}, nil - case 8: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil - case 9: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil - case 10: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldName10 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10}, nil - } - return &generalStructDecoder{typ, fields}, nil -} - -type generalStructDecoder struct { - typ reflect.Type - fields map[string]*structFieldDecoder -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - fieldBytes := iter.readObjectFieldAsBytes() - field := *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder := decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - for iter.nextToken() == ',' { - fieldBytes = iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type skipObjectDecoder struct { - typ reflect.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect.Type - fieldHash int32 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type twoFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type threeFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type fourFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type fiveFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type sixFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type sevenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type eightFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type nineFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder - fieldHash9 int32 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type tenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder - fieldHash9 int32 - fieldDecoder9 *structFieldDecoder - fieldHash10 int32 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type structFieldDecoder struct { - field *reflect.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go deleted file mode 100644 index 9323848398..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream.go +++ /dev/null @@ -1,307 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// Stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - n int - Error error - indention int -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, bufSize), - n: 0, - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.n = 0 -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return len(stream.buf) - stream.n -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return stream.n -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf[:stream.n] -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - for len(p) > stream.Available() && stream.Error == nil { - if stream.out == nil { - stream.growAtLeast(len(p)) - } else { - var n int - if stream.Buffered() == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, stream.Error = stream.out.Write(p) - } else { - n = copy(stream.buf[stream.n:], p) - stream.n += n - stream.Flush() - } - nn += n - p = p[n:] - } - } - if stream.Error != nil { - return nn, stream.Error - } - n := copy(stream.buf[stream.n:], p) - stream.n += n - nn += n - return nn, nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - if stream.Error != nil { - return - } - if stream.Available() < 1 { - stream.growAtLeast(1) - } - stream.buf[stream.n] = c - stream.n++ -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 2 { - stream.growAtLeast(2) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.n += 2 -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 3 { - stream.growAtLeast(3) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.n += 3 -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 4 { - stream.growAtLeast(4) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.n += 4 -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 5 { - stream.growAtLeast(5) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.buf[stream.n+4] = c5 - stream.n += 5 -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - if stream.n == 0 { - return nil - } - n, err := stream.out.Write(stream.buf[0:stream.n]) - if n < stream.n && err == nil { - err = io.ErrShortWrite - } - if err != nil { - if n > 0 && n < stream.n { - copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) - } - stream.n -= n - stream.Error = err - return err - } - stream.n = 0 - return nil -} - -func (stream *Stream) ensure(minimal int) { - available := stream.Available() - if available < minimal { - stream.growAtLeast(minimal) - } -} - -func (stream *Stream) growAtLeast(minimal int) { - if stream.out != nil { - stream.Flush() - if stream.Available() >= minimal { - return - } - } - toGrow := len(stream.buf) - if toGrow < minimal { - toGrow = minimal - } - newBuf := make([]byte, len(stream.buf)+toGrow) - copy(newBuf, stream.Buffer()) - stream.buf = newBuf -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.ensure(len(s)) - if stream.Error != nil { - return - } - n := copy(stream.buf[stream.n:], s) - stream.n += n -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeTwoBytes('[', ']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - stream.ensure(toWrite) - for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { - stream.buf[stream.n] = ' ' - stream.n++ - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go deleted file mode 100644 index 9a404e11d4..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_float.go +++ /dev/null @@ -1,96 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - stream.ensure(10) - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - stream.ensure(10) - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go deleted file mode 100644 index 7cfd522c10..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_int.go +++ /dev/null @@ -1,320 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(buf []byte, v uint32, n int) int { - start := v >> 24 - if start == 0 { - buf[n] = byte(v >> 16) - n++ - buf[n] = byte(v >> 8) - n++ - } else if start == 1 { - buf[n] = byte(v >> 8) - n++ - } - buf[n] = byte(v) - n++ - return n -} - -func writeBuf(buf []byte, v uint32, n int) { - buf[n] = byte(v >> 16) - buf[n+1] = byte(v >> 8) - buf[n+2] = byte(v) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.ensure(3) - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - stream.ensure(4) - n := stream.n - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint8(nval) - } - stream.n = writeFirstBuf(stream.buf, digits[val], n) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - stream.ensure(5) - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) - return - } - r1 := val - q1*1000 - n := writeFirstBuf(stream.buf, digits[q1], stream.n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - stream.ensure(6) - n := stream.n - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint16(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - n = writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - stream.ensure(10) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - stream.ensure(11) - n := stream.n - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint32(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - stream.ensure(20) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - n = writeFirstBuf(stream.buf, digits[q6], n) - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - stream.ensure(20) - n := stream.n - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint64(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - stream.buf[n] = byte(q6 + '0') - n++ - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go deleted file mode 100644 index 334282f05f..0000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_string.go +++ /dev/null @@ -1,396 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML