From 4281606105cb824f1930286bc0bca0fd73dcea3d Mon Sep 17 00:00:00 2001 From: Peter Schultz Date: Thu, 17 May 2018 10:56:34 +0200 Subject: [PATCH 01/44] Make VaultSource compatible with KV Backend V2 Detect if a KV backend uses the new v2 versioning features and rewrite request paths and bodies if necessary. The new API uses additional /data/ and /metadata/ for GET/PUT and LIST operations, respectively. To facilitate versioning, v2 wraps the actual payload in a JSON object with "data" and "metadata" keys: { "data": {}, "metadata": { } } --- cert/source_test.go | 30 +++++++++++++- cert/vault_source.go | 98 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 120 insertions(+), 8 deletions(-) diff --git a/cert/source_test.go b/cert/source_test.go index 41943a13f..30c407f66 100644 --- a/cert/source_test.go +++ b/cert/source_test.go @@ -339,6 +339,15 @@ func vaultServer(t *testing.T, addr, rootToken string) (*exec.Cmd, *vaultapi.Cli capabilities = ["read"] } + # Vault >= 0.10. (KV Version 2) + path "secret/metadata/fabio/cert/" { + capabilities = ["list"] + } + + path "secret/data/fabio/cert/*" { + capabilities = ["read"] + } + path "test-pki/issue/fabio" { capabilities = ["update"] } @@ -425,7 +434,26 @@ func TestVaultSource(t *testing.T) { // create a cert and store it in vault certPEM, keyPEM := makePEM("localhost", time.Minute) data := map[string]interface{}{"cert": string(certPEM), "key": string(keyPEM)} - if _, err := client.Logical().Write(certPath+"/localhost", data); err != nil { + + var nilSource *VaultSource // for calling helper methods + + mountPath, v2, err := nilSource.isKVv2(certPath, client) + if err != nil { + t.Fatal(err) + } + + p := certPath + "/localhost" + if v2 { + t.Log("Vault: KV backend: V2") + data = map[string]interface{}{ + "data": data, + "options": map[string]interface{}{}, + } + p = nilSource.addPrefixToVKVPath(p, mountPath, "data") + } else { + t.Log("Vault: KV backend: V1") + } + if _, err := client.Logical().Write(p, data); err != nil { t.Fatalf("logical.Write failed: %s", err) } diff --git a/cert/vault_source.go b/cert/vault_source.go index a847ad2e8..be326202f 100644 --- a/cert/vault_source.go +++ b/cert/vault_source.go @@ -5,6 +5,8 @@ import ( "crypto/x509" "fmt" "log" + "path" + "strings" "time" "github.com/hashicorp/vault/api" @@ -47,8 +49,20 @@ func (s *VaultSource) load(path string) (pemBlocks map[string][]byte, err error) // they are recognized by the post-processing function // which assembles the certificates. // The value can be stored either as string or []byte. - get := func(name, typ string, secret *api.Secret) { - v := secret.Data[typ] + get := func(name, typ string, secret *api.Secret, v2 bool) { + data := secret.Data + if v2 { + x, ok := secret.Data["data"] + if !ok { + return + } + data, ok = x.(map[string]interface{}) + if !ok { + return + } + } + + v := data[typ] if v == nil { return } @@ -72,9 +86,19 @@ func (s *VaultSource) load(path string) (pemBlocks map[string][]byte, err error) return nil, fmt.Errorf("vault: client: %s", err) } + mountPath, v2, err := s.isKVv2(path, c) + if err != nil { + return nil, fmt.Errorf("vault: query mount path: %s", err) + } + // get the subkeys under 'path'. // Each subkey refers to a certificate. - certs, err := c.Logical().List(path) + p := path + if v2 { + p = s.addPrefixToVKVPath(p, mountPath, "metadata") + } + + certs, err := c.Logical().List(p) if err != nil { return nil, fmt.Errorf("vault: list: %s", err) } @@ -82,17 +106,77 @@ func (s *VaultSource) load(path string) (pemBlocks map[string][]byte, err error) return nil, nil } - for _, s := range certs.Data["keys"].([]interface{}) { - name := s.(string) + for _, x := range certs.Data["keys"].([]interface{}) { + name := x.(string) p := path + "/" + name + if v2 { + p = s.addPrefixToVKVPath(p, mountPath, "data") + } secret, err := c.Logical().Read(p) if err != nil { log.Printf("[WARN] cert: Failed to read %s from Vault: %s", p, err) continue } - get(name, "cert", secret) - get(name, "key", secret) + get(name, "cert", secret, v2) + get(name, "key", secret, v2) } return pemBlocks, nil } + +func (s *VaultSource) addPrefixToVKVPath(p, mountPath, apiPrefix string) string { + p = strings.TrimPrefix(p, mountPath) + return path.Join(mountPath, apiPrefix, p) +} + +func (s *VaultSource) isKVv2(path string, client *api.Client) (string, bool, error) { + mountPath, version, err := s.kvPreflightVersionRequest(client, path) + if err != nil { + return "", false, err + } + + return mountPath, version == 2, nil +} + +func (s *VaultSource) kvPreflightVersionRequest(client *api.Client, path string) (string, int, error) { + r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path) + resp, err := client.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + // If we get a 404 we are using an older version of vault, default to + // version 1 + if resp != nil && resp.StatusCode == 404 { + return "", 1, nil + } + + return "", 0, err + } + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return "", 0, err + } + var mountPath string + if mountPathRaw, ok := secret.Data["path"]; ok { + mountPath = mountPathRaw.(string) + } + options := secret.Data["options"] + if options == nil { + return mountPath, 1, nil + } + versionRaw := options.(map[string]interface{})["version"] + if versionRaw == nil { + return mountPath, 1, nil + } + version := versionRaw.(string) + switch version { + case "", "1": + return mountPath, 1, nil + case "2": + return mountPath, 2, nil + } + + return mountPath, 1, nil +} From ac2a9a2cb267c340e72826b6fb5f762e681052c9 Mon Sep 17 00:00:00 2001 From: Peter Schultz Date: Thu, 17 May 2018 17:30:44 +0200 Subject: [PATCH 02/44] Fix Consul readines check Consul added suppported for compressed HTTP responses in 1.0.7, which means we can no longer rely on the Content-Length response header when checking if the server is ready yet. --- Makefile | 4 ++-- cert/source_test.go | 15 +++++++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 6d2ff11ba..3a9ca9421 100644 --- a/Makefile +++ b/Makefile @@ -27,8 +27,8 @@ GOVENDOR = $(shell which govendor) VENDORFMT = $(shell which vendorfmt) # pin versions for CI builds -CI_CONSUL_VERSION=1.0.6 -CI_VAULT_VERSION=0.9.6 +CI_CONSUL_VERSION=1.1.0 +CI_VAULT_VERSION=0.10.1 CI_GO_VERSION=1.10.2 # all is the default target diff --git a/cert/source_test.go b/cert/source_test.go index 30c407f66..32c27442d 100644 --- a/cert/source_test.go +++ b/cert/source_test.go @@ -9,6 +9,7 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" + "io" "io/ioutil" "log" "math/big" @@ -250,8 +251,18 @@ func TestConsulSource(t *testing.T) { resp, err := http.Get("http://127.0.0.1:8500/v1/status/leader") // /v1/status/leader returns '\n""' while consul is in leader election mode // and '"127.0.0.1:8300"' when not. So we punt by checking the - // Content-Length header instead of the actual body content :) - return err == nil && resp.StatusCode == 200 && resp.ContentLength > 10 + // body length instead of the actual body content :) + if err != nil { + return false + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return false + } + + n, err := io.Copy(ioutil.Discard, resp.Body) + return err == nil && n > 10 } // We need give consul ~8-10 seconds to become ready until I've From 26c555ef9e5e39650e59c48bfebe16b16730dac9 Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Wed, 15 Aug 2018 13:10:18 -0500 Subject: [PATCH 03/44] Issue #530 - Updated Vendored github.com/rcrowley/go-metrics package to version - revision:e2704e165165ec55d062f5919b4b29494e9fa790 --- .../github.com/rcrowley/go-metrics/README.md | 65 +++++++-- vendor/github.com/rcrowley/go-metrics/ewma.go | 46 ++++-- .../github.com/rcrowley/go-metrics/gauge.go | 36 +++++ .../rcrowley/go-metrics/gauge_float64.go | 52 +++++-- vendor/github.com/rcrowley/go-metrics/json.go | 62 +------- vendor/github.com/rcrowley/go-metrics/log.go | 36 +++-- .../github.com/rcrowley/go-metrics/meter.go | 114 +++++++++------ .../rcrowley/go-metrics/registry.go | 137 +++++++++++++++++- .../github.com/rcrowley/go-metrics/runtime.go | 64 ++++---- .../go-metrics/runtime_gccpufraction.go | 9 ++ .../go-metrics/runtime_no_gccpufraction.go | 9 ++ .../github.com/rcrowley/go-metrics/sample.go | 9 +- .../github.com/rcrowley/go-metrics/timer.go | 18 +++ .../rcrowley/go-metrics/validate.sh | 2 +- vendor/vendor.json | 2 +- 15 files changed, 477 insertions(+), 184 deletions(-) create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md index e36b9dcbb..b7356b5fc 100644 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -21,6 +21,9 @@ g := metrics.NewGauge() metrics.Register("bar", g) g.Update(47) +r := NewRegistry() +g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) + s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) h := metrics.NewHistogram(s) metrics.Register("baz", h) @@ -36,10 +39,29 @@ t.Time(func() {}) t.Update(47) ``` +Register() is not threadsafe. For threadsafe metric registration use +GetOrRegister: + +```go +t := metrics.GetOrRegisterTimer("account.create.latency", nil) +t.Time(func() {}) +t.Update(47) +``` + +**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will +leak memory: + +```go +// Will call Stop() on the Meter to allow for garbage collection +metrics.Unregister("quux") +// Or similarly for a Timer that embeds a Meter +metrics.Unregister("bang") +``` + Periodically log every metric in human-readable form to standard error: ```go -go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) +go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) ``` Periodically log every metric in slightly-more-parseable form to syslog: @@ -67,14 +89,15 @@ issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and [#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. ```go -import "github.com/rcrowley/go-metrics/influxdb" - -go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ - Host: "127.0.0.1:8086", - Database: "metrics", - Username: "test", - Password: "test", -}) +import "github.com/vrischmann/go-metrics-influxdb" + +go influxdb.InfluxDB(metrics.DefaultRegistry, + 10e9, + "127.0.0.1:8086", + "database-name", + "username", + "password" +) ``` Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): @@ -103,6 +126,19 @@ import "github.com/rcrowley/go-metrics/stathat" go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") ``` +Maintain all metrics along with expvars at `/debug/metrics`: + +This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) +but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars +as well as all your go-metrics. + + +```go +import "github.com/rcrowley/go-metrics/exp" + +exp.Exp(metrics.DefaultRegistry) +``` + Installation ------------ @@ -121,5 +157,12 @@ Publishing Metrics Clients are available for the following destinations: -* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato) -* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite) +* Librato - https://github.com/mihasya/go-metrics-librato +* Graphite - https://github.com/cyberdelia/go-metrics-graphite +* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb +* Ganglia - https://github.com/appscode/metlia +* Prometheus - https://github.com/deathowl/go-metrics-prometheus +* DataDog - https://github.com/syntaqx/go-metrics-datadog +* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx +* Honeycomb - https://github.com/getspine/go-metrics-honeycomb +* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go index 694a1d033..a8183dd7e 100644 --- a/vendor/github.com/rcrowley/go-metrics/ewma.go +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -79,16 +79,15 @@ func (NilEWMA) Update(n int64) {} type StandardEWMA struct { uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment alpha float64 - rate float64 - init bool + rate uint64 + init uint32 mutex sync.Mutex } // Rate returns the moving average rate of events per second. func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(1e9) + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) + return currentRate } // Snapshot returns a read-only copy of the EWMA. @@ -99,17 +98,38 @@ func (a *StandardEWMA) Snapshot() EWMA { // Tick ticks the clock to update the moving average. It assumes it is called // every five seconds. func (a *StandardEWMA) Tick() { + // Optimization to avoid mutex locking in the hot-path. + if atomic.LoadUint32(&a.init) == 1 { + a.updateRate(a.fetchInstantRate()) + } else { + // Slow-path: this is only needed on the first Tick() and preserves transactional updating + // of init and rate in the else block. The first conditional is needed below because + // a different thread could have set a.init = 1 between the time of the first atomic load and when + // the lock was acquired. + a.mutex.Lock() + if atomic.LoadUint32(&a.init) == 1 { + // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section + // but again, this section is only invoked on the first successful Tick() operation. + a.updateRate(a.fetchInstantRate()) + } else { + atomic.StoreUint32(&a.init, 1) + atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) + } + a.mutex.Unlock() + } +} + +func (a *StandardEWMA) fetchInstantRate() float64 { count := atomic.LoadInt64(&a.uncounted) atomic.AddInt64(&a.uncounted, -count) instantRate := float64(count) / float64(5e9) - a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) - } else { - a.init = true - a.rate = instantRate - } + return instantRate +} + +func (a *StandardEWMA) updateRate(instantRate float64) { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) + currentRate += a.alpha * (instantRate - currentRate) + atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) } // Update adds n uncounted events. diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go index 807638a31..cb57a9388 100644 --- a/vendor/github.com/rcrowley/go-metrics/gauge.go +++ b/vendor/github.com/rcrowley/go-metrics/gauge.go @@ -36,6 +36,24 @@ func NewRegisteredGauge(name string, r Registry) Gauge { return c } +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGauge(f func() int64) Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &FunctionalGauge{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { + c := NewFunctionalGauge(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + // GaugeSnapshot is a read-only copy of another Gauge. type GaugeSnapshot int64 @@ -82,3 +100,21 @@ func (g *StandardGauge) Update(v int64) { func (g *StandardGauge) Value() int64 { return atomic.LoadInt64(&g.value) } + +// FunctionalGauge returns value from given function +type FunctionalGauge struct { + value func() int64 +} + +// Value returns the gauge's current value. +func (g FunctionalGauge) Value() int64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGauge) Update(int64) { + panic("Update called on a FunctionalGauge") +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go index 47c3566c2..3962e6db0 100644 --- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -1,6 +1,9 @@ package metrics -import "sync" +import ( + "math" + "sync/atomic" +) // GaugeFloat64s hold a float64 value that can be set arbitrarily. type GaugeFloat64 interface { @@ -38,6 +41,24 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { return c } +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &FunctionalGaugeFloat64{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { + c := NewFunctionalGaugeFloat64(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + // GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. type GaugeFloat64Snapshot float64 @@ -67,8 +88,7 @@ func (NilGaugeFloat64) Value() float64 { return 0.0 } // StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses // sync.Mutex to manage a single float64 value. type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 + value uint64 } // Snapshot returns a read-only copy of the gauge. @@ -78,14 +98,28 @@ func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { // Update updates the gauge's value. func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v + atomic.StoreUint64(&g.value, math.Float64bits(v)) } // Value returns the gauge's current value. func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value + return math.Float64frombits(atomic.LoadUint64(&g.value)) +} + +// FunctionalGaugeFloat64 returns value from given function +type FunctionalGaugeFloat64 struct { + value func() float64 +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeFloat64) Value() float64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeFloat64) Update(float64) { + panic("Update called on a FunctionalGaugeFloat64") } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go index 2676aeea5..174b9477e 100644 --- a/vendor/github.com/rcrowley/go-metrics/json.go +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -9,63 +9,7 @@ import ( // MarshalJSON returns a byte slice containing a JSON representation of all // the metrics in the Registry. func (r *StandardRegistry) MarshalJSON() ([]byte, error) { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return json.Marshal(data) + return json.Marshal(r.GetAll()) } // WriteJSON writes metrics from the given registry periodically to the @@ -81,3 +25,7 @@ func WriteJSON(r Registry, d time.Duration, w io.Writer) { func WriteJSONOnce(r Registry, w io.Writer) { json.NewEncoder(w).Encode(r) } + +func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(p.GetAll()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go index 278a8a441..f8074c045 100644 --- a/vendor/github.com/rcrowley/go-metrics/log.go +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -1,14 +1,24 @@ package metrics import ( - "log" "time" ) +type Logger interface { + Printf(format string, v ...interface{}) +} + +func Log(r Registry, freq time.Duration, l Logger) { + LogScaled(r, freq, time.Nanosecond, l) +} + // Output each metric in the given registry periodically using the given -// logger. -func Log(r Registry, d time.Duration, l *log.Logger) { - for _ = range time.Tick(d) { +// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. +func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + du := float64(scale) + duSuffix := scale.String()[1:] + + for _ = range time.Tick(freq) { r.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: @@ -51,15 +61,15 @@ func Log(r Registry, d time.Duration, l *log.Logger) { ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) l.Printf("timer %s\n", name) l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %9d\n", t.Min()) - l.Printf(" max: %9d\n", t.Max()) - l.Printf(" mean: %12.2f\n", t.Mean()) - l.Printf(" stddev: %12.2f\n", t.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) + l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) + l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) + l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go index 0389ab0b8..7807406a3 100644 --- a/vendor/github.com/rcrowley/go-metrics/meter.go +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -1,7 +1,9 @@ package metrics import ( + "math" "sync" + "sync/atomic" "time" ) @@ -15,10 +17,13 @@ type Meter interface { Rate15() float64 RateMean() float64 Snapshot() Meter + Stop() } // GetOrRegisterMeter returns an existing Meter or constructs and registers a // new StandardMeter. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func GetOrRegisterMeter(name string, r Registry) Meter { if nil == r { r = DefaultRegistry @@ -27,6 +32,7 @@ func GetOrRegisterMeter(name string, r Registry) Meter { } // NewMeter constructs a new StandardMeter and launches a goroutine. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. func NewMeter() Meter { if UseNilMetrics { return NilMeter{} @@ -34,7 +40,7 @@ func NewMeter() Meter { m := newStandardMeter() arbiter.Lock() defer arbiter.Unlock() - arbiter.meters = append(arbiter.meters, m) + arbiter.meters[m] = struct{}{} if !arbiter.started { arbiter.started = true go arbiter.tick() @@ -44,6 +50,8 @@ func NewMeter() Meter { // NewMeter constructs and registers a new StandardMeter and launches a // goroutine. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func NewRegisteredMeter(name string, r Registry) Meter { c := NewMeter() if nil == r { @@ -56,7 +64,7 @@ func NewRegisteredMeter(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { count int64 - rate1, rate5, rate15, rateMean float64 + rate1, rate5, rate15, rateMean uint64 } // Count returns the count of events at the time the snapshot was taken. @@ -69,23 +77,26 @@ func (*MeterSnapshot) Mark(n int64) { // Rate1 returns the one-minute moving average rate of events per second at the // time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } +func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } // Rate5 returns the five-minute moving average rate of events per second at // the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } +func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } // Rate15 returns the fifteen-minute moving average rate of events per second // at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } +func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } // RateMean returns the meter's mean rate of events per second at the time the // snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } +func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } // Snapshot returns the snapshot. func (m *MeterSnapshot) Snapshot() Meter { return m } +// Stop is a no-op. +func (m *MeterSnapshot) Stop() {} + // NilMeter is a no-op Meter. type NilMeter struct{} @@ -110,12 +121,17 @@ func (NilMeter) RateMean() float64 { return 0.0 } // Snapshot is a no-op. func (NilMeter) Snapshot() Meter { return NilMeter{} } +// Stop is a no-op. +func (NilMeter) Stop() {} + // StandardMeter is the standard implementation of a Meter. type StandardMeter struct { - lock sync.RWMutex + // Only used on stop. + lock sync.Mutex snapshot *MeterSnapshot a1, a5, a15 EWMA startTime time.Time + stopped uint32 } func newStandardMeter() *StandardMeter { @@ -128,19 +144,32 @@ func newStandardMeter() *StandardMeter { } } +// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. +func (m *StandardMeter) Stop() { + m.lock.Lock() + stopped := m.stopped + m.stopped = 1 + m.lock.Unlock() + if stopped != 1 { + arbiter.Lock() + delete(arbiter.meters, m) + arbiter.Unlock() + } +} + // Count returns the number of events recorded. func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count + return atomic.LoadInt64(&m.snapshot.count) } // Mark records the occurance of n events. func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - m.snapshot.count += n + if atomic.LoadUint32(&m.stopped) == 1 { + return + } + + atomic.AddInt64(&m.snapshot.count, n) + m.a1.Update(n) m.a5.Update(n) m.a15.Update(n) @@ -149,70 +178,65 @@ func (m *StandardMeter) Mark(n int64) { // Rate1 returns the one-minute moving average rate of events per second. func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) } // Rate5 returns the five-minute moving average rate of events per second. func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) } // Rate15 returns the fifteen-minute moving average rate of events per second. func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) } // RateMean returns the meter's mean rate of events per second. func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) } // Snapshot returns a read-only copy of the meter. func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := *m.snapshot - m.lock.RUnlock() - return &snapshot + copiedSnapshot := MeterSnapshot{ + count: atomic.LoadInt64(&m.snapshot.count), + rate1: atomic.LoadUint64(&m.snapshot.rate1), + rate5: atomic.LoadUint64(&m.snapshot.rate5), + rate15: atomic.LoadUint64(&m.snapshot.rate15), + rateMean: atomic.LoadUint64(&m.snapshot.rateMean), + } + return &copiedSnapshot } func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() + rate1 := math.Float64bits(m.a1.Rate()) + rate5 := math.Float64bits(m.a5.Rate()) + rate15 := math.Float64bits(m.a15.Rate()) + rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) + + atomic.StoreUint64(&m.snapshot.rate1, rate1) + atomic.StoreUint64(&m.snapshot.rate5, rate5) + atomic.StoreUint64(&m.snapshot.rate15, rate15) + atomic.StoreUint64(&m.snapshot.rateMean, rateMean) } func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() m.a1.Tick() m.a5.Tick() m.a15.Tick() m.updateSnapshot() } +// meterArbiter ticks meters every 5s from a single goroutine. +// meters are references in a set for future stopping. type meterArbiter struct { sync.RWMutex started bool - meters []*StandardMeter + meters map[*StandardMeter]struct{} ticker *time.Ticker } -var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} +var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} // Ticks meters on the scheduled interval func (ma *meterArbiter) tick() { @@ -227,7 +251,7 @@ func (ma *meterArbiter) tick() { func (ma *meterArbiter) tickMeters() { ma.RLock() defer ma.RUnlock() - for _, meter := range ma.meters { + for meter := range ma.meters { meter.tick() } } diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go index a3b26eba5..b3bab64e1 100644 --- a/vendor/github.com/rcrowley/go-metrics/registry.go +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -3,6 +3,7 @@ package metrics import ( "fmt" "reflect" + "strings" "sync" ) @@ -28,6 +29,9 @@ type Registry interface { // Get the metric by the given name or nil if none is registered. Get(string) interface{} + // GetAll metrics in the Registry. + GetAll() map[string]map[string]interface{} + // Gets an existing metric or registers the given one. // The interface can be the metric to register if not found in registry, // or a function returning the metric for lazy instantiation. @@ -50,7 +54,7 @@ type Registry interface { // of names to metrics. type StandardRegistry struct { metrics map[string]interface{} - mutex sync.Mutex + mutex sync.RWMutex } // Create a new registry. @@ -67,8 +71,8 @@ func (r *StandardRegistry) Each(f func(string, interface{})) { // Get the metric by the given name or nil if none is registered. func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() + r.mutex.RLock() + defer r.mutex.RUnlock() return r.metrics[name] } @@ -77,6 +81,15 @@ func (r *StandardRegistry) Get(name string) interface{} { // The interface can be the metric to register if not found in registry, // or a function returning the metric for lazy instantiation. func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + // access the read lock first which should be re-entrant + r.mutex.RLock() + metric, ok := r.metrics[name] + r.mutex.RUnlock() + if ok { + return metric + } + + // only take the write lock if we'll be modifying the metrics map r.mutex.Lock() defer r.mutex.Unlock() if metric, ok := r.metrics[name]; ok { @@ -99,8 +112,8 @@ func (r *StandardRegistry) Register(name string, i interface{}) error { // Run all registered healthchecks. func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() + r.mutex.RLock() + defer r.mutex.RUnlock() for _, i := range r.metrics { if h, ok := i.(Healthcheck); ok { h.Check() @@ -108,10 +121,72 @@ func (r *StandardRegistry) RunHealthchecks() { } } +// GetAll metrics in the Registry +func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return data +} + // Unregister the metric with the given name. func (r *StandardRegistry) Unregister(name string) { r.mutex.Lock() defer r.mutex.Unlock() + r.stop(name) delete(r.metrics, name) } @@ -120,6 +195,7 @@ func (r *StandardRegistry) UnregisterAll() { r.mutex.Lock() defer r.mutex.Unlock() for name, _ := range r.metrics { + r.stop(name) delete(r.metrics, name) } } @@ -145,6 +221,19 @@ func (r *StandardRegistry) registered() map[string]interface{} { return metrics } +func (r *StandardRegistry) stop(name string) { + if i, ok := r.metrics[name]; ok { + if s, ok := i.(Stoppable); ok { + s.Stop() + } + } +} + +// Stoppable defines the metrics which has to be stopped. +type Stoppable interface { + Stop() +} + type PrefixedRegistry struct { underlying Registry prefix string @@ -157,14 +246,43 @@ func NewPrefixedRegistry(prefix string) Registry { } } +func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { + return &PrefixedRegistry{ + underlying: parent, + prefix: prefix, + } +} + // Call the given function for each registered metric. func (r *PrefixedRegistry) Each(fn func(string, interface{})) { - r.underlying.Each(fn) + wrappedFn := func(prefix string) func(string, interface{}) { + return func(name string, iface interface{}) { + if strings.HasPrefix(name, prefix) { + fn(name, iface) + } else { + return + } + } + } + + baseRegistry, prefix := findPrefix(r, "") + baseRegistry.Each(wrappedFn(prefix)) +} + +func findPrefix(registry Registry, prefix string) (Registry, string) { + switch r := registry.(type) { + case *PrefixedRegistry: + return findPrefix(r.underlying, r.prefix+prefix) + case *StandardRegistry: + return r, prefix + } + return nil, "" } // Get the metric by the given name or nil if none is registered. func (r *PrefixedRegistry) Get(name string) interface{} { - return r.underlying.Get(name) + realName := r.prefix + name + return r.underlying.Get(realName) } // Gets an existing metric or registers the given one. @@ -186,6 +304,11 @@ func (r *PrefixedRegistry) RunHealthchecks() { r.underlying.RunHealthchecks() } +// GetAll metrics in the Registry +func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { + return r.underlying.GetAll() +} + // Unregister the metric with the given name. The name will be prefixed. func (r *PrefixedRegistry) Unregister(name string) { realName := r.prefix + name diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go index 82574bf25..11c6b785a 100644 --- a/vendor/github.com/rcrowley/go-metrics/runtime.go +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -2,6 +2,7 @@ package metrics import ( "runtime" + "runtime/pprof" "time" ) @@ -9,35 +10,37 @@ var ( memStats runtime.MemStats runtimeMetrics struct { MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + GCCPUFraction GaugeFloat64 + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge } NumCgoCall Gauge NumGoroutine Gauge + NumThread Gauge ReadMemStats Timer } frees uint64 @@ -45,6 +48,8 @@ var ( mallocs uint64 numGC uint32 numCgoCalls int64 + + threadCreateProfile = pprof.Lookup("threadcreate") ) // Capture new values for the Go runtime statistics exported in @@ -97,6 +102,7 @@ func CaptureRuntimeMemStatsOnce(r Registry) { runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) // i := numGC % uint32(len(memStats.PauseNs)) @@ -132,6 +138,8 @@ func CaptureRuntimeMemStatsOnce(r Registry) { numCgoCalls = currentNumCgoCalls runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) + + runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) } // Register runtimeMetrics for the Go runtime statistics exported in runtime and @@ -158,6 +166,7 @@ func RegisterRuntimeMemStats(r Registry) { runtimeMetrics.MemStats.MSpanSys = NewGauge() runtimeMetrics.MemStats.NextGC = NewGauge() runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) runtimeMetrics.MemStats.PauseTotalNs = NewGauge() runtimeMetrics.MemStats.StackInuse = NewGauge() @@ -166,6 +175,7 @@ func RegisterRuntimeMemStats(r Registry) { runtimeMetrics.MemStats.TotalAlloc = NewGauge() runtimeMetrics.NumCgoCall = NewGauge() runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() runtimeMetrics.ReadMemStats = NewTimer() r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) @@ -188,6 +198,7 @@ func RegisterRuntimeMemStats(r Registry) { r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) @@ -196,5 +207,6 @@ func RegisterRuntimeMemStats(r Registry) { r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) } diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go new file mode 100644 index 000000000..ca12c05ba --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go @@ -0,0 +1,9 @@ +// +build go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return memStats.GCCPUFraction +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go new file mode 100644 index 000000000..be96aa6f1 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go @@ -0,0 +1,9 @@ +// +build !go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go index 5f6a37788..fecee5ef6 100644 --- a/vendor/github.com/rcrowley/go-metrics/sample.go +++ b/vendor/github.com/rcrowley/go-metrics/sample.go @@ -33,7 +33,7 @@ type Sample interface { // priority reservoir. See Cormode et al's "Forward Decay: A Practical Time // Decay Model for Streaming Systems". // -// +// type ExpDecaySample struct { alpha float64 count int64 @@ -302,6 +302,13 @@ type SampleSnapshot struct { values []int64 } +func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { + return &SampleSnapshot{ + count: count, + values: values, + } +} + // Clear panics. func (*SampleSnapshot) Clear() { panic("Clear called on a SampleSnapshot") diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go index 17db8f8d2..d6ec4c626 100644 --- a/vendor/github.com/rcrowley/go-metrics/timer.go +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -19,6 +19,7 @@ type Timer interface { RateMean() float64 Snapshot() Timer StdDev() float64 + Stop() Sum() int64 Time(func()) Update(time.Duration) @@ -28,6 +29,8 @@ type Timer interface { // GetOrRegisterTimer returns an existing Timer or constructs and registers a // new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func GetOrRegisterTimer(name string, r Registry) Timer { if nil == r { r = DefaultRegistry @@ -36,6 +39,7 @@ func GetOrRegisterTimer(name string, r Registry) Timer { } // NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. func NewCustomTimer(h Histogram, m Meter) Timer { if UseNilMetrics { return NilTimer{} @@ -47,6 +51,8 @@ func NewCustomTimer(h Histogram, m Meter) Timer { } // NewRegisteredTimer constructs and registers a new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func NewRegisteredTimer(name string, r Registry) Timer { c := NewTimer() if nil == r { @@ -58,6 +64,7 @@ func NewRegisteredTimer(name string, r Registry) Timer { // NewTimer constructs a new StandardTimer using an exponentially-decaying // sample with the same reservoir size and alpha as UNIX load averages. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. func NewTimer() Timer { if UseNilMetrics { return NilTimer{} @@ -112,6 +119,9 @@ func (NilTimer) Snapshot() Timer { return NilTimer{} } // StdDev is a no-op. func (NilTimer) StdDev() float64 { return 0.0 } +// Stop is a no-op. +func (NilTimer) Stop() {} + // Sum is a no-op. func (NilTimer) Sum() int64 { return 0 } @@ -201,6 +211,11 @@ func (t *StandardTimer) StdDev() float64 { return t.histogram.StdDev() } +// Stop stops the meter. +func (t *StandardTimer) Stop() { + t.meter.Stop() +} + // Sum returns the sum in the sample. func (t *StandardTimer) Sum() int64 { return t.histogram.Sum() @@ -288,6 +303,9 @@ func (t *TimerSnapshot) Snapshot() Timer { return t } // was taken. func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } +// Stop is a no-op. +func (t *TimerSnapshot) Stop() {} + // Sum returns the sum at the time the snapshot was taken. func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh index f6499982e..c4ae91e64 100755 --- a/vendor/github.com/rcrowley/go-metrics/validate.sh +++ b/vendor/github.com/rcrowley/go-metrics/validate.sh @@ -7,4 +7,4 @@ GOFMT_LINES=`gofmt -l . | wc -l | xargs` test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" # run the tests for the root package -go test . +go test -race . diff --git a/vendor/vendor.json b/vendor/vendor.json index 2df73f2ef..dec0970af 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -40,7 +40,7 @@ {"path":"github.com/pascaldekloe/goe/verify","checksumSHA1":"4tr8yNUt5DB8GXc5y+uq6J7TJ54=","revision":"f99183613f483cd9b8c79359d572836e243e0763","revisionTime":"2015-07-19T21:56:08+02:00"}, {"path":"github.com/pkg/profile","checksumSHA1":"C3yiSMdTQxSY3xqKJzMV9T+KnIc=","revision":"5b67d428864e92711fcbd2f8629456121a56d91f","revisionTime":"2017-05-09T09:25:25Z"}, {"path":"github.com/pubnub/go-metrics-statsd","checksumSHA1":"xiXWXI7iNZ4Vkr/ayWU3KsGnRo0=","revision":"7da61f429d6bdaa79d5d5746998e0db9622c56fc","revisionTime":"2017-01-24T01:40:03Z"}, - {"path":"github.com/rcrowley/go-metrics","checksumSHA1":"ODTWX4h8f+DW3oWZFT0yTmfHzdg=","revision":"3e5e593311103d49927c8d2b0fd93ccdfe4a525c","revisionTime":"2015-07-19T09:56:14-07:00"}, + {"path":"github.com/rcrowley/go-metrics","checksumSHA1":"an5RM8wjgPPloolUUYkvEncbHu4=","revision":"e2704e165165ec55d062f5919b4b29494e9fa790","revisionTime":"2018-05-03T17:46:38Z"}, {"path":"github.com/rogpeppe/fastuuid","checksumSHA1":"ehRkDJisGCCSYdNgyvs1gSywSPE=","revision":"6724a57986aff9bff1a1770e9347036def7c89f6","revisionTime":"2015-01-06T09:31:45Z"}, {"path":"github.com/sergi/go-diff/diffmatchpatch","checksumSHA1":"iWCtyR1TkJ22Bi/ygzfKDvOQdQY=","revision":"24e2351369ec4949b2ed0dc5c477afdd4c4034e8","revisionTime":"2017-01-18T13:12:30Z"}, {"path":"golang.org/x/net/http2","checksumSHA1":"N1akwAdrHVfPPrsFOhG2ouP21VA=","revision":"f2499483f923065a842d38eb4c7f1927e6fc6e6d","revisionTime":"2017-01-14T04:22:49Z"}, From 3a32d4e8e81ac587cfd580b9ae5c1bf4ed543dd9 Mon Sep 17 00:00:00 2001 From: Aaron Hurt Date: Thu, 16 Aug 2018 10:36:09 -0500 Subject: [PATCH 04/44] Update CHANGELOG --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4c201872..d448db3de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,12 @@ #### Bug Fixes + * [Issue #530](https://github.com/fabiolb/fabio/issues/530): Memory leak in go-metrics library + + When metrics collection was enabled within fabio instances with very dynamic route changes memory usage quickly + ramped above expected levels. Research done by @galen0624 identified the issue and lead to the discovery of a + fix in an updated version of the go-metrics library used by fabio. + * [Issue #506](https://github.com/fabiolb/fabio/issues/506): Wrong route for multiple matching host glob patterns When multiple host glob patterns match an incoming request fabio can pick the wrong backend for the request. From f7b337da61a7f0f26eac399a899918c05683c0a0 Mon Sep 17 00:00:00 2001 From: Aaron Hurt Date: Thu, 16 Aug 2018 10:39:37 -0500 Subject: [PATCH 05/44] Update CHANGELOG --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d448db3de..75310e3f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,8 +9,8 @@ * [Issue #530](https://github.com/fabiolb/fabio/issues/530): Memory leak in go-metrics library When metrics collection was enabled within fabio instances with very dynamic route changes memory usage quickly - ramped above expected levels. Research done by @galen0624 identified the issue and lead to the discovery of a - fix in an updated version of the go-metrics library used by fabio. + ramped above expected levels. Research done by [@galen0624](https://github.com/galen0624) identified the issue + and lead to the discovery of a fix in an updated version of the go-metrics library used by fabio. * [Issue #506](https://github.com/fabiolb/fabio/issues/506): Wrong route for multiple matching host glob patterns From 484b9dca20e52ecf4e9f514283477150346d30b3 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 17:33:54 +0200 Subject: [PATCH 06/44] Revert "Merge branch 'fcgi-handler' of https://github.com/Gufran/fabio" This reverts commit 0daf269186ef57d374bbd60ac21fe18144a69186, reversing changes made to 8489b4ead1151a4b9c880933173d4d91930dc55a. --- NOTICES.txt | 5 - config/config.go | 9 - config/default.go | 7 - config/load.go | 5 - docs/content/feature/fastcgi-upstream.md | 14 - docs/content/quickstart/_index.md | 4 - docs/content/ref/fcgi.index.md | 12 - docs/content/ref/fcgi.path.split.md | 12 - docs/content/ref/fcgi.root.md | 11 - docs/content/ref/fcgi.timeout.read.md | 11 - docs/content/ref/fcgi.timeout.write.md | 11 - main.go | 2 +- proxy/fastcgi/fcgiclient.go | 587 ----------------------- proxy/fastcgi/helper_test.go | 48 -- proxy/fastcgi/proxy.go | 287 ----------- proxy/fastcgi/proxy_test.go | 180 ------- proxy/http_integration_test.go | 46 +- proxy/http_proxy.go | 38 +- proxy/listen_test.go | 1 - proxy/ws_integration_test.go | 4 +- 20 files changed, 28 insertions(+), 1266 deletions(-) delete mode 100644 docs/content/feature/fastcgi-upstream.md delete mode 100644 docs/content/ref/fcgi.index.md delete mode 100644 docs/content/ref/fcgi.path.split.md delete mode 100644 docs/content/ref/fcgi.root.md delete mode 100644 docs/content/ref/fcgi.timeout.read.md delete mode 100644 docs/content/ref/fcgi.timeout.write.md delete mode 100644 proxy/fastcgi/fcgiclient.go delete mode 100644 proxy/fastcgi/helper_test.go delete mode 100644 proxy/fastcgi/proxy.go delete mode 100644 proxy/fastcgi/proxy_test.go diff --git a/NOTICES.txt b/NOTICES.txt index 462b87c19..6ead09f39 100644 --- a/NOTICES.txt +++ b/NOTICES.txt @@ -157,8 +157,3 @@ golang.org/x/sync/singleflight https://golang.org/x/sync/singleflight License: BSD 3-clause (https://golang.org/x/sync/LICENSE) Copyright (c) 2009 The Go Authors. All rights reserved. - -github.com/mholt/caddy -https://github.com/mholt/caddy -License: Apache 2.0 (https://github.com/mholt/caddy/LICENSE.txt) -Copyright (c) 2015, Light Code Labs, LLC diff --git a/config/config.go b/config/config.go index d6950e915..f8ad7e4c2 100644 --- a/config/config.go +++ b/config/config.go @@ -14,7 +14,6 @@ type Config struct { Metrics Metrics UI UI Runtime Runtime - FastCGI FastCGI ProfileMode string ProfilePath string Insecure bool @@ -145,11 +144,3 @@ type Consul struct { CheckDeregisterCriticalServiceAfter string ChecksRequired string } - -type FastCGI struct { - Index string - Root string - SplitPath string - ReadTimeout time.Duration - WriteTimeout time.Duration -} diff --git a/config/default.go b/config/default.go index 0a10e036e..45f68424d 100644 --- a/config/default.go +++ b/config/default.go @@ -77,11 +77,4 @@ var defaultConfig = &Config{ Color: "light-green", Access: "rw", }, - FastCGI: FastCGI{ - Root: "", - Index: "index.php", - SplitPath: ".php", - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - }, } diff --git a/config/load.go b/config/load.go index 1d2c15e9b..d75188471 100644 --- a/config/load.go +++ b/config/load.go @@ -186,11 +186,6 @@ func load(cmdline, environ, envprefix []string, props *properties.Properties) (c f.StringVar(&cfg.UI.Title, "ui.title", defaultConfig.UI.Title, "optional title for the UI") f.StringVar(&cfg.ProfileMode, "profile.mode", defaultConfig.ProfileMode, "enable profiling mode, one of [cpu, mem, mutex, block]") f.StringVar(&cfg.ProfilePath, "profile.path", defaultConfig.ProfilePath, "path to profile dump file") - f.StringVar(&cfg.FastCGI.Index, "fcgi.index", defaultConfig.FastCGI.Index, "FastCGI index file name") - f.StringVar(&cfg.FastCGI.Root, "fcgi.root", defaultConfig.FastCGI.Root, "Document root of FastCGI upstream") - f.StringVar(&cfg.FastCGI.SplitPath, "fcgi.path.split", defaultConfig.FastCGI.SplitPath, "String literal to split the document path") - f.DurationVar(&cfg.FastCGI.ReadTimeout, "fcgi.timeout.read", defaultConfig.FastCGI.ReadTimeout, "FastCGI request read timeout") - f.DurationVar(&cfg.FastCGI.WriteTimeout, "fcgi.timeout.write", defaultConfig.FastCGI.WriteTimeout, "FastCGI request write timeout") // deprecated flags var proxyLogRoutes string diff --git a/docs/content/feature/fastcgi-upstream.md b/docs/content/feature/fastcgi-upstream.md deleted file mode 100644 index 3532543fa..000000000 --- a/docs/content/feature/fastcgi-upstream.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "FastCGI Upstream" ---- - -To support FastCGI upstream add `proto=fcgi` option to the `urlprefix-` tag. - -FastCGI upstreams support following configuration options: - - - `index`: Used to specify the index file that should be used if the request URL does not contain a - file. - - `root`: Document root of the FastCGI server. - -Note that `index` and `root` can also be set in Fabio configuration as global default. - diff --git a/docs/content/quickstart/_index.md b/docs/content/quickstart/_index.md index 4bd376679..3bc0f6cd2 100644 --- a/docs/content/quickstart/_index.md +++ b/docs/content/quickstart/_index.md @@ -46,10 +46,6 @@ and you need to add a separate `urlprefix-` tag for every `host/path` prefix the # TCP examples urlprefix-:3306 proto=tcp # route external port 3306 - - # Fast-CGI example - urlprefix-/blog proto=fcgi - urlprefix-/home proto=fcgi strip=/home ``` 5. Start fabio without a config file diff --git a/docs/content/ref/fcgi.index.md b/docs/content/ref/fcgi.index.md deleted file mode 100644 index 70f902e0f..000000000 --- a/docs/content/ref/fcgi.index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "fcgi.index" ---- - -`fcgi.index` configures the index file to be used in FastCGI requests if the URL does not contain -it. - -Default value is - -``` -fcgi.index = index.php -``` diff --git a/docs/content/ref/fcgi.path.split.md b/docs/content/ref/fcgi.path.split.md deleted file mode 100644 index dca597b32..000000000 --- a/docs/content/ref/fcgi.path.split.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: "fcgi.path.split" ---- - -`fcgi.path.split` specifies how to split the URL; the split value becomes the end of the first part -and anything in the URL after it becomes part of the `PATH_INFO` CGI variable. - -Default value is - -``` -fcgi.path.split = .php -``` diff --git a/docs/content/ref/fcgi.root.md b/docs/content/ref/fcgi.root.md deleted file mode 100644 index 9e4cc1dbf..000000000 --- a/docs/content/ref/fcgi.root.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "fcgi.root" ---- - -`fcgi.root` sets the document root for FastCGI requests. - -Default value is empty string - -``` -fcgi.root = -``` diff --git a/docs/content/ref/fcgi.timeout.read.md b/docs/content/ref/fcgi.timeout.read.md deleted file mode 100644 index 5b6f109fa..000000000 --- a/docs/content/ref/fcgi.timeout.read.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "fcgi.timeout.read" ---- - -`fcgi.timeout.read` is the time allowed to read a response from upstream. - -Default value is - -``` -fcgi.timeout.read = 10s -``` diff --git a/docs/content/ref/fcgi.timeout.write.md b/docs/content/ref/fcgi.timeout.write.md deleted file mode 100644 index 798d145e8..000000000 --- a/docs/content/ref/fcgi.timeout.write.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "fcgi.timeout.write" ---- - -`fcgi.timeout.write` is the time allowed to upload complete request to upstream. - -Default value is - -``` -fcgi.timeout.write = 10s -``` diff --git a/main.go b/main.go index ad13af032..264c30978 100644 --- a/main.go +++ b/main.go @@ -179,7 +179,7 @@ func newHTTPProxy(cfg *config.Config) http.Handler { } return &proxy.HTTPProxy{ - Config: cfg, + Config: cfg.Proxy, Transport: newTransport(nil), InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}), Lookup: func(r *http.Request) *route.Target { diff --git a/proxy/fastcgi/fcgiclient.go b/proxy/fastcgi/fcgiclient.go deleted file mode 100644 index cf6cc23c9..000000000 --- a/proxy/fastcgi/fcgiclient.go +++ /dev/null @@ -1,587 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Forked ac865e8 on Jan. 2018 from https://github.com/mholt/caddy/blob/master/caddyhttp/fastcgi/fcgiclient.go -// Which is forked Jan. 2015 from http://bitbucket.org/PinIdea/fcgi_client -// (which is forked from https://code.google.com/p/go-fastcgi-client/) - -// This fork contains several fixes and improvements by Matt Holt and -// other contributors to Caddy project. - -// Copyright 2012 Junqing Tan and The Go Authors -// Use of this source code is governed by a BSD-style -// Part of source code is from Go fcgi package - -package fastcgi - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "mime/multipart" - "net" - "net/http" - "net/http/httputil" - "net/textproto" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" -) - -// FCGIListenSockFileno describes listen socket file number. -const FCGIListenSockFileno uint8 = 0 - -// FCGIHeaderLen describes header length. -const FCGIHeaderLen uint8 = 8 - -// Version1 describes the version. -const Version1 uint8 = 1 - -// FCGINullRequestID describes the null request ID. -const FCGINullRequestID uint8 = 0 - -// FCGIKeepConn describes keep connection mode. -const FCGIKeepConn uint8 = 1 - -const ( - // BeginRequest is the begin request flag. - BeginRequest uint8 = iota + 1 - // AbortRequest is the abort request flag. - AbortRequest - // EndRequest is the end request flag. - EndRequest - // Params is the parameters flag. - Params - // Stdin is the standard input flag. - Stdin - // Stdout is the standard output flag. - Stdout - // Stderr is the standard error flag. - Stderr - // Data is the data flag. - Data - // GetValues is the get values flag. - GetValues - // GetValuesResult is the get values result flag. - GetValuesResult - // UnknownType is the unknown type flag. - UnknownType - // MaxType is the maximum type flag. - MaxType = UnknownType -) - -const ( - // Responder is the responder flag. - Responder uint8 = iota + 1 - // Authorizer is the authorizer flag. - Authorizer - // Filter is the filter flag. - Filter -) - -const ( - // RequestComplete is the completed request flag. - RequestComplete uint8 = iota - // CantMultiplexConns is the multiplexed connections flag. - CantMultiplexConns - // Overloaded is the overloaded flag. - Overloaded - // UnknownRole is the unknown role flag. - UnknownRole -) - -const ( - // MaxConns is the maximum connections flag. - MaxConns string = "MAX_CONNS" - // MaxRequests is the maximum requests flag. - MaxRequests string = "MAX_REQS" - // MultiplexConns is the multiplex connections flag. - MultiplexConns string = "MPXS_CONNS" -) - -const ( - maxWrite = 65500 // 65530 may work, but for compatibility - maxPad = 255 -) - -type header struct { - Version uint8 - Type uint8 - ID uint16 - ContentLength uint16 - PaddingLength uint8 - Reserved uint8 -} - -// for padding so we don't have to allocate all the time -// not synchronized because we don't care what the contents are -var pad [maxPad]byte - -func (h *header) init(recType uint8, reqID uint16, contentLength int) { - h.Version = 1 - h.Type = recType - h.ID = reqID - h.ContentLength = uint16(contentLength) - h.PaddingLength = uint8(-contentLength & 7) -} - -type record struct { - h header - rbuf []byte -} - -func (rec *record) read(r io.Reader) (buf []byte, err error) { - if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { - return - } - if rec.h.Version != 1 { - err = errors.New("fcgi: invalid header version") - return - } - if rec.h.Type == EndRequest { - err = io.EOF - return - } - n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) - if len(rec.rbuf) < n { - rec.rbuf = make([]byte, n) - } - if _, err = io.ReadFull(r, rec.rbuf[:n]); err != nil { - return - } - buf = rec.rbuf[:int(rec.h.ContentLength)] - - return -} - -// FCGIClient implements a FastCGI client, which is a standard for -// interfacing external applications with Web servers. -type FCGIClient struct { - mutex sync.Mutex - rwc io.ReadWriteCloser - h header - buf bytes.Buffer - stderr bytes.Buffer - keepAlive bool - reqID uint16 - readTimeout time.Duration - sendTimeout time.Duration -} - -// DialWithDialerContext connects to the fcgi responder at the specified network address, using custom net.Dialer -// and a context. -// See func net.Dial for a description of the network and address parameters. -func DialWithDialerContext(ctx context.Context, network, address string, dialer net.Dialer) (fcgi *FCGIClient, err error) { - var conn net.Conn - conn, err = dialer.DialContext(ctx, network, address) - if err != nil { - return - } - - fcgi = &FCGIClient{ - rwc: conn, - keepAlive: false, - reqID: 1, - } - - return -} - -// DialContext is like Dial but passes ctx to dialer.Dial. -func DialContext(ctx context.Context, network, address string) (fcgi *FCGIClient, err error) { - return DialWithDialerContext(ctx, network, address, net.Dialer{}) -} - -// Dial connects to the fcgi responder at the specified network address, using default net.Dialer. -// See func net.Dial for a description of the network and address parameters. -func Dial(network, address string) (fcgi *FCGIClient, err error) { - return DialContext(context.Background(), network, address) -} - -// Close closes fcgi connnection -func (c *FCGIClient) Close() { - c.rwc.Close() -} - -func (c *FCGIClient) writeRecord(recType uint8, content []byte) (err error) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.buf.Reset() - c.h.init(recType, c.reqID, len(content)) - if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { - return err - } - if _, err := c.buf.Write(content); err != nil { - return err - } - if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { - return err - } - _, err = c.rwc.Write(c.buf.Bytes()) - return err -} - -func (c *FCGIClient) writeBeginRequest(role uint16, flags uint8) error { - b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(BeginRequest, b[:]) -} - -func (c *FCGIClient) writeEndRequest(appStatus int, protocolStatus uint8) error { - b := make([]byte, 8) - binary.BigEndian.PutUint32(b, uint32(appStatus)) - b[4] = protocolStatus - return c.writeRecord(EndRequest, b) -} - -func (c *FCGIClient) writePairs(recType uint8, pairs map[string]string) error { - w := newWriter(c, recType) - b := make([]byte, 8) - nn := 0 - for k, v := range pairs { - m := 8 + len(k) + len(v) - if m > maxWrite { - // param data size exceed 65535 bytes" - vl := maxWrite - 8 - len(k) - v = v[:vl] - } - n := encodeSize(b, uint32(len(k))) - n += encodeSize(b[n:], uint32(len(v))) - m = n + len(k) + len(v) - if (nn + m) > maxWrite { - w.Flush() - nn = 0 - } - nn += m - if _, err := w.Write(b[:n]); err != nil { - return err - } - if _, err := w.WriteString(k); err != nil { - return err - } - if _, err := w.WriteString(v); err != nil { - return err - } - } - w.Close() - return nil -} - -func encodeSize(b []byte, size uint32) int { - if size > 127 { - size |= 1 << 31 - binary.BigEndian.PutUint32(b, size) - return 4 - } - b[0] = byte(size) - return 1 -} - -// bufWriter encapsulates bufio.Writer but also closes the underlying stream when -// Closed. -type bufWriter struct { - closer io.Closer - *bufio.Writer -} - -func (w *bufWriter) Close() error { - if err := w.Writer.Flush(); err != nil { - w.closer.Close() - return err - } - return w.closer.Close() -} - -func newWriter(c *FCGIClient, recType uint8) *bufWriter { - s := &streamWriter{c: c, recType: recType} - w := bufio.NewWriterSize(s, maxWrite) - return &bufWriter{s, w} -} - -// streamWriter abstracts out the separation of a stream into discrete records. -// It only writes maxWrite bytes at a time. -type streamWriter struct { - c *FCGIClient - recType uint8 -} - -func (w *streamWriter) Write(p []byte) (int, error) { - nn := 0 - for len(p) > 0 { - n := len(p) - if n > maxWrite { - n = maxWrite - } - if err := w.c.writeRecord(w.recType, p[:n]); err != nil { - return nn, err - } - nn += n - p = p[n:] - } - return nn, nil -} - -func (w *streamWriter) Close() error { - // send empty record to close the stream - return w.c.writeRecord(w.recType, nil) -} - -type streamReader struct { - c *FCGIClient - buf []byte -} - -func (w *streamReader) Read(p []byte) (n int, err error) { - - if len(p) > 0 { - if len(w.buf) == 0 { - - // filter outputs for error log - for { - rec := &record{} - var buf []byte - buf, err = rec.read(w.c.rwc) - if err != nil { - return - } - // standard error output - if rec.h.Type == Stderr { - w.c.stderr.Write(buf) - continue - } - w.buf = buf - break - } - } - - n = len(p) - if n > len(w.buf) { - n = len(w.buf) - } - copy(p, w.buf[:n]) - w.buf = w.buf[n:] - } - - return -} - -// Do made the request and returns a io.Reader that translates the data read -// from fcgi responder out of fcgi packet before returning it. -func (c *FCGIClient) Do(p map[string]string, req io.Reader) (r io.Reader, err error) { - err = c.writeBeginRequest(uint16(Responder), 0) - if err != nil { - return - } - - err = c.writePairs(Params, p) - if err != nil { - return - } - - body := newWriter(c, Stdin) - if req != nil { - io.Copy(body, req) - } - body.Close() - - r = &streamReader{c: c} - return -} - -// clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer -// that closes FCGIClient connection. -type clientCloser struct { - *FCGIClient - io.Reader -} - -func (f clientCloser) Close() error { return f.rwc.Close() } - -// Request returns a HTTP Response with Header and Body -// from fcgi responder -func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) { - r, err := c.Do(p, req) - if err != nil { - return - } - - rb := bufio.NewReader(r) - tp := textproto.NewReader(rb) - resp = new(http.Response) - - // Parse the response headers. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil && err != io.EOF { - return - } - resp.Header = http.Header(mimeHeader) - - if resp.Header.Get("Status") != "" { - statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2) - resp.StatusCode, err = strconv.Atoi(statusParts[0]) - if err != nil { - return - } - if len(statusParts) > 1 { - resp.Status = statusParts[1] - } - - } else { - resp.StatusCode = http.StatusOK - } - - // TODO: fixTransferEncoding ? - resp.TransferEncoding = resp.Header["Transfer-Encoding"] - resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - - if chunked(resp.TransferEncoding) { - resp.Body = clientCloser{c, httputil.NewChunkedReader(rb)} - } else { - resp.Body = clientCloser{c, ioutil.NopCloser(rb)} - } - return -} - -// Get issues a GET request to the fcgi responder. -func (c *FCGIClient) Get(p map[string]string) (resp *http.Response, err error) { - - p["REQUEST_METHOD"] = "GET" - p["CONTENT_LENGTH"] = "0" - - return c.Request(p, nil) -} - -// Head issues a HEAD request to the fcgi responder. -func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error) { - - p["REQUEST_METHOD"] = "HEAD" - p["CONTENT_LENGTH"] = "0" - - return c.Request(p, nil) -} - -// Options issues an OPTIONS request to the fcgi responder. -func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err error) { - - p["REQUEST_METHOD"] = "OPTIONS" - p["CONTENT_LENGTH"] = "0" - - return c.Request(p, nil) -} - -// Post issues a POST request to the fcgi responder. with request body -// in the format that bodyType specified -func (c *FCGIClient) Post(p map[string]string, method string, bodyType string, body io.Reader, l int64) (resp *http.Response, err error) { - if p == nil { - p = make(map[string]string) - } - - p["REQUEST_METHOD"] = strings.ToUpper(method) - - if len(p["REQUEST_METHOD"]) == 0 || p["REQUEST_METHOD"] == "GET" { - p["REQUEST_METHOD"] = "POST" - } - - p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10) - if len(bodyType) > 0 { - p["CONTENT_TYPE"] = bodyType - } else { - p["CONTENT_TYPE"] = "application/x-www-form-urlencoded" - } - - return c.Request(p, body) -} - -// PostForm issues a POST to the fcgi responder, with form -// as a string key to a list values (url.Values) -func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) { - body := bytes.NewReader([]byte(data.Encode())) - return c.Post(p, "POST", "application/x-www-form-urlencoded", body, int64(body.Len())) -} - -// PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard, -// with form as a string key to a list values (url.Values), -// and/or with file as a string key to a list file path. -func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) { - buf := &bytes.Buffer{} - writer := multipart.NewWriter(buf) - bodyType := writer.FormDataContentType() - - for key, val := range data { - for _, v0 := range val { - err = writer.WriteField(key, v0) - if err != nil { - return - } - } - } - - for key, val := range file { - fd, e := os.Open(val) - if e != nil { - return nil, e - } - defer fd.Close() - - part, e := writer.CreateFormFile(key, filepath.Base(val)) - if e != nil { - return nil, e - } - _, err = io.Copy(part, fd) - if err != nil { - return - } - } - - err = writer.Close() - if err != nil { - return - } - - return c.Post(p, "POST", bodyType, buf, int64(buf.Len())) -} - -// SetReadTimeout sets the read timeout for future calls that read from the -// fcgi responder. A zero value for t means no timeout will be set. -func (c *FCGIClient) SetReadTimeout(t time.Duration) error { - if conn, ok := c.rwc.(net.Conn); ok && t != 0 { - return conn.SetReadDeadline(time.Now().Add(t)) - } - return nil -} - -// SetSendTimeout sets the read timeout for future calls that send data to -// the fcgi responder. A zero value for t means no timeout will be set. -func (c *FCGIClient) SetSendTimeout(t time.Duration) error { - if conn, ok := c.rwc.(net.Conn); ok && t != 0 { - return conn.SetWriteDeadline(time.Now().Add(t)) - } - return nil -} - -// Checks whether chunked is part of the encodings stack -func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } - -// Stderr returns any error produced by FCGI backend -// while processing the request. -func (c *FCGIClient) Stderr() string { - return c.stderr.String() -} diff --git a/proxy/fastcgi/helper_test.go b/proxy/fastcgi/helper_test.go deleted file mode 100644 index ec931b9d5..000000000 --- a/proxy/fastcgi/helper_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package fastcgi - -import ( - "io" - "net/http" - "time" -) - -type staticFcgiBackend struct { - OptionsFunc func(map[string]string) (*http.Response, error) - HeadFunc func(map[string]string) (*http.Response, error) - GetFunc func(map[string]string) (*http.Response, error) - PostFunc func(map[string]string, string, string, io.Reader, int64) (*http.Response, error) - SetReadTimeoutFunc func(time.Duration) error - SetSendTimeoutFunc func(time.Duration) error - StderrFunc func() string - CloseFunc func() -} - -func (b *staticFcgiBackend) Options(params map[string]string) (*http.Response, error) { - return b.OptionsFunc(params) -} - -func (b *staticFcgiBackend) Head(params map[string]string) (*http.Response, error) { - return b.HeadFunc(params) -} - -func (b *staticFcgiBackend) Get(params map[string]string) (*http.Response, error) { - return b.GetFunc(params) -} - -func (b *staticFcgiBackend) SetReadTimeout(dur time.Duration) error { - return b.SetReadTimeoutFunc(dur) -} - -func (b *staticFcgiBackend) SetSendTimeout(dur time.Duration) error { - return b.SetSendTimeoutFunc(dur) -} - -func (b *staticFcgiBackend) Stderr() string { - return b.StderrFunc() -} - -func (b *staticFcgiBackend) Post(params map[string]string, method string, bodyType string, body io.Reader, l int64) (*http.Response, error) { - return b.PostFunc(params, method, bodyType, body, l) -} - -func (b *staticFcgiBackend) Close() {} diff --git a/proxy/fastcgi/proxy.go b/proxy/fastcgi/proxy.go deleted file mode 100644 index 398e26d85..000000000 --- a/proxy/fastcgi/proxy.go +++ /dev/null @@ -1,287 +0,0 @@ -package fastcgi - -import ( - "fmt" - "io" - "log" - "net" - "net/http" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/fabiolb/fabio/config" -) - -var ( - headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_") -) - -type Proxy struct { - root string - index string - stripPrefix string - upstream string - config *config.Config - dialFunc func(string) (FCGIBackend, error) -} - -// FCGIBackend describes the capabilities offered by -// FastCGI bakcned server -type FCGIBackend interface { - // Options proxies HTTP OPTION request to FCGI backend - Options(parameters map[string]string) (resp *http.Response, err error) - - // Head proxies HTTP HEAD request to FCGI backend - Head(parameters map[string]string) (resp *http.Response, err error) - - // Get proxies HTTP GET request to FCGI backend - Get(parameters map[string]string) (resp *http.Response, err error) - - // Post proxies HTTP Post request to FCGI backend - Post(parameters map[string]string, method string, bodyType string, body io.Reader, contentLength int64) (resp *http.Response, err error) - - // SetReadTimeout sets the maximum time duration the - // connection will wait to read full response. If the - // deadline is reached before the response is read in - // full, the client receives a gateway timeout error. - SetReadTimeout(time.Duration) error - - // SetSendTimeout sets the maximum time duration the - // connection will wait to send full request to FCGI backend. - // If the deadline is reached before the request is sent - // completely, the client receives a gateway timeout error. - SetSendTimeout(time.Duration) error - - // Stderr returns any error produced by FCGI backend - // while processing the request. - Stderr() string - - // Close closes the connection. - Close() -} - -func NewProxy(cfg *config.Config, upstream string) *Proxy { - return &Proxy{ - root: cfg.FastCGI.Root, - index: cfg.FastCGI.Index, - upstream: upstream, - config: cfg, - dialFunc: Connect, - } -} - -// Connect to FCGI backend -func Connect(upstream string) (FCGIBackend, error) { - return Dial("tcp", upstream) -} - -func (p *Proxy) SetRoot(root string) { - p.root = root -} - -func (p *Proxy) SetStripPathPrefix(prefix string) { - p.stripPrefix = prefix -} - -func (p *Proxy) SetIndex(index string) { - p.index = index -} - -func (p *Proxy) stripPathPrefix(path string) string { - return strings.TrimPrefix(path, p.stripPrefix) -} - -func (p *Proxy) ensureIndexFile(path string) string { - prefix := "" - if strings.HasPrefix(path, "/") { - prefix = "/" - } - - if strings.HasPrefix(path, prefix+p.index) { - return path - } - - return filepath.Join(p.index, path) -} - -func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { - fpath := p.stripPathPrefix(r.URL.Path) - env, err := p.buildEnv(r, p.ensureIndexFile(fpath)) - if err != nil { - log.Printf("[WARN] failed to create fastcgi environment. %s", err) - http.Error(w, "Internal server error", http.StatusInternalServerError) - return - } - - for x, y := range env { - log.Printf("[INFO] >>>> %s => %s", x, y) - } - - fcgiBackend, err := p.dialFunc(p.upstream) - if err != nil { - log.Printf("[WARN] failed to connect with FastCGI upstream. %s", err) - http.Error(w, "Bad Gateway", http.StatusBadGateway) - return - } - defer fcgiBackend.Close() - - if err := fcgiBackend.SetReadTimeout(p.config.FastCGI.ReadTimeout); err != nil { - log.Printf("[ERROR] failed to set connection read timeout. %s", err) - http.Error(w, "Internal server error", http.StatusInternalServerError) - return - } - if err := fcgiBackend.SetSendTimeout(p.config.FastCGI.WriteTimeout); err != nil { - log.Printf("[ERROR] failed to set connection write timeout. %s", err) - http.Error(w, "Internal server error", http.StatusInternalServerError) - return - } - - var resp *http.Response - - contentLength := r.ContentLength - if contentLength == 0 { - contentLength, _ = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) - } - - switch r.Method { - case "HEAD": - resp, err = fcgiBackend.Head(env) - case "GET": - resp, err = fcgiBackend.Get(env) - case "OPTIONS": - resp, err = fcgiBackend.Options(env) - default: - resp, err = fcgiBackend.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength) - } - - if resp != nil && resp.Body != nil { - defer resp.Body.Close() - } - - if err != nil { - if err, ok := err.(net.Error); ok && err.Timeout() { - log.Printf("[ERROR] FastCGI upstream timed out during request. %s", err) - http.Error(w, "Gateway Timeout", http.StatusGatewayTimeout) - return - } else if err != io.EOF { - log.Printf("[ERROR] failed to read response from FastCGI upstream. %s", err) - http.Error(w, "Bad Gateway", http.StatusBadGateway) - return - } - } - - writeHeader(w, resp) - - _, err = io.Copy(w, resp.Body) - if err != nil { - log.Printf("[ERROR] failed to write response body. %s", err) - http.Error(w, "Bad Gateway", http.StatusBadGateway) - return - } - - if errOut := fcgiBackend.Stderr(); errOut != "" { - log.Printf("[WARN] Error from FastCGI upstream: %s", errOut) - return - } -} - -func writeHeader(w http.ResponseWriter, r *http.Response) { - for key, vals := range r.Header { - for _, val := range vals { - w.Header().Add(key, val) - } - } - w.WriteHeader(r.StatusCode) -} - -func (p Proxy) buildEnv(r *http.Request, fpath string) (map[string]string, error) { - absPath := filepath.Join(p.root, fpath) - - // Separate remote IP and port; more lenient than net.SplitHostPort - var ip, port string - if idx := strings.LastIndex(r.RemoteAddr, ":"); idx > -1 { - ip = r.RemoteAddr[:idx] - port = r.RemoteAddr[idx+1:] - } else { - ip = r.RemoteAddr - } - - username := "" - if r.URL.User != nil { - username = r.URL.User.Username() - } - - // Remove [] from IPv6 addresses - ip = strings.TrimPrefix(ip, "[") - ip = strings.TrimSuffix(ip, "]") - - splitPos := p.splitPos(fpath) - if splitPos == -1 { - return nil, fmt.Errorf("cannot split path on %s", p.config.FastCGI.SplitPath) - } - - // Request has the extension; path was split successfully - docURI := fpath[:splitPos+len(p.config.FastCGI.SplitPath)] - pathInfo := fpath[splitPos+len(p.config.FastCGI.SplitPath):] - scriptName := fpath - scriptFilename := absPath - - // Strip PATH_INFO from SCRIPT_NAME - scriptName = strings.TrimSuffix(scriptName, pathInfo) - - // Some variables are unused but cleared explicitly to prevent - // the parent environment from interfering. - env := map[string]string{ - // Variables defined in CGI 1.1 spec - "AUTH_TYPE": "", // Not used - "CONTENT_LENGTH": r.Header.Get("Content-Length"), - "CONTENT_TYPE": r.Header.Get("Content-Type"), - "GATEWAY_INTERFACE": "CGI/1.1", - "PATH_INFO": pathInfo, - "QUERY_STRING": r.URL.RawQuery, - "REMOTE_ADDR": ip, - "REMOTE_HOST": ip, // For speed, remote host lookups disabled - "REMOTE_PORT": port, - "REMOTE_IDENT": "", // Not used - "REMOTE_USER": username, - "REQUEST_METHOD": r.Method, - "SERVER_NAME": r.URL.Hostname(), - "SERVER_PORT": r.URL.Port(), - "SERVER_PROTOCOL": r.Proto, - "SERVER_SOFTWARE": "fabio", - - // Other variables - "DOCUMENT_ROOT": p.root, - "DOCUMENT_URI": docURI, - "HTTP_HOST": r.Host, // added here, since not always part of headers - "REQUEST_URI": p.stripPathPrefix(r.URL.RequestURI()), - "SCRIPT_FILENAME": scriptFilename, - "SCRIPT_NAME": scriptName, - } - - // compliance with the CGI specification requires that - // PATH_TRANSLATED should only exist if PATH_INFO is defined. - // Info: https://www.ietf.org/rfc/rfc3875 Page 14 - if env["PATH_INFO"] != "" { - env["PATH_TRANSLATED"] = filepath.Join(p.root, pathInfo) // Info: http://www.oreilly.com/openbook/cgi/ch02_04.html - } - - // Some web apps rely on knowing HTTPS or not - if r.TLS != nil { - env["HTTPS"] = "on" - } - - // Add all HTTP headers to env variables - for field, val := range r.Header { - header := strings.ToUpper(field) - header = headerNameReplacer.Replace(header) - env["HTTP_"+header] = strings.Join(val, ", ") - } - return env, nil -} - -func (p *Proxy) splitPos(path string) int { - return strings.Index(strings.ToLower(path), strings.ToLower(p.config.FastCGI.SplitPath)) -} diff --git a/proxy/fastcgi/proxy_test.go b/proxy/fastcgi/proxy_test.go deleted file mode 100644 index 2d8865b8d..000000000 --- a/proxy/fastcgi/proxy_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package fastcgi - -import ( - "crypto/tls" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/fabiolb/fabio/config" -) - -func getBackendDialer(b *staticFcgiBackend) func(string) (FCGIBackend, error) { - return func(u string) (FCGIBackend, error) { - return b, nil - } -} - -type rc struct { - eof error - v []byte -} - -func (r *rc) Close() error { return nil } - -func (r *rc) Read(p []byte) (n int, err error) { - if r.eof != nil { - return 0, r.eof - } - - copy(p, r.v) - r.eof = io.EOF - return len(r.v), nil -} - -func TestServeHTTP(t *testing.T) { - data := struct { - readTimeout time.Duration - sendTimeout time.Duration - env map[string]string - method string - contentType string - body io.Reader - contentLength int64 - }{} - - req, err := http.NewRequest("post", "https://app.host/user/index.php/profile?key=value", strings.NewReader("test request body")) - if err != nil { - t.Error("failed to create new http request", err) - } - req.Header.Add("Content-Length", "17") - req.Header.Add("Content-Type", "text/plain") - - response := http.Response{ - Status: http.StatusText(http.StatusOK), - StatusCode: http.StatusOK, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: http.Header{}, - ContentLength: 10, - TransferEncoding: nil, - Close: true, - Uncompressed: false, - Request: req, - TLS: nil, - Body: &rc{ - v: []byte("successful response"), - }, - } - - backend := &staticFcgiBackend{ - SetReadTimeoutFunc: func(t time.Duration) error { - data.readTimeout = t - return nil - }, - SetSendTimeoutFunc: func(t time.Duration) error { - data.sendTimeout = t - return nil - }, - PostFunc: func(params map[string]string, m string, ct string, b io.Reader, cl int64) (*http.Response, error) { - data.env = params - data.method = m - data.contentType = ct - data.body = b - data.contentLength = cl - return &response, nil - }, - StderrFunc: func() string { return "" }, - } - - proxy := Proxy{ - upstream: "app.fpm.internal", - config: &config.Config{ - FastCGI: config.FastCGI{ - Root: "/site", - SplitPath: ".php", - ReadTimeout: 3 * time.Second, - WriteTimeout: 3 * time.Second, - }, - }, - dialFunc: getBackendDialer(backend), - } - - resp := httptest.NewRecorder() - proxy.ServeHTTP(resp, req) - - if resp.Code != http.StatusOK { - t.Errorf("expected response code '200', got '%d'", resp.Code) - } - if resp.Body.String() != "successful response" { - t.Errorf("expected response body 'successful response', got '%s'", resp.Body.String()) - } -} - -func TestBuildEnv(t *testing.T) { - proxy := Proxy{ - upstream: "app.fpm.internal", - config: &config.Config{ - FastCGI: config.FastCGI{ - Root: "/site", - SplitPath: ".php", - }, - }, - dialFunc: nil, - } - - req, err := http.NewRequest("post", "https://app.host:443/test/url?key=value", strings.NewReader("test request body")) - if err != nil { - t.Error("failed to create new http request", err) - } - - req.Header.Add("Content-Length", "17") - req.Header.Add("Content-Type", "text/plain") - req.Header.Add("X-Custom-Header-One", "One") - req.TLS = &tls.ConnectionState{} - - env, err := proxy.buildEnv(req, "/docs/index.php/user/profile") - if err != nil { - t.Error("failed to build environment", err) - } - - expected := map[string]string{ - "AUTH_TYPE": "", - "CONTENT_LENGTH": "17", - "CONTENT_TYPE": "text/plain", - "GATEWAY_INTERFACE": "CGI/1.1", - "PATH_INFO": "/user/profile", - "QUERY_STRING": "key=value", - "REMOTE_ADDR": "", - "REMOTE_HOST": "", - "REMOTE_PORT": "", - "REMOTE_IDENT": "", - "REMOTE_USER": "", - "REQUEST_METHOD": "post", - "SERVER_NAME": "app.host", - "SERVER_PORT": "443", - "SERVER_PROTOCOL": "HTTP/1.1", - "SERVER_SOFTWARE": "fabio", - "DOCUMENT_ROOT": "/site", - "DOCUMENT_URI": "/docs/index.php", - "HTTP_HOST": "app.host:443", - "REQUEST_URI": "/test/url?key=value", - "SCRIPT_FILENAME": "/site/docs/index.php/user/profile", - "SCRIPT_NAME": "/docs/index.php", - "PATH_TRANSLATED": "/site/user/profile", - "HTTPS": "on", - "HTTP_X_CUSTOM_HEADER_ONE": "One", - } - - for ke, ve := range expected { - if v, ok := env[ke]; !ok { - t.Errorf("key '%s' is not present in environment map", ke) - } else if v != ve { - t.Errorf("Key '%s': expected value '%s', got '%s'", ke, ve, v) - } - } -} diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index 00e06e824..5a6d083fa 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -35,9 +35,7 @@ func TestProxyProducesCorrectXForwardedSomethingHeader(t *testing.T) { defer server.Close() proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{ - Proxy: config.Proxy{LocalIP: "1.1.1.1", ClientIPHeader: "X-Forwarded-For"}, - }, + Config: config.Proxy{LocalIP: "1.1.1.1", ClientIPHeader: "X-Forwarded-For"}, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { return &route.Target{URL: mustParse(server.URL)} @@ -66,7 +64,7 @@ func TestProxyRequestIDHeader(t *testing.T) { defer server.Close() proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{Proxy: config.Proxy{RequestID: "X-Request-Id"}}, + Config: config.Proxy{RequestID: "X-Request-Id"}, Transport: http.DefaultTransport, UUID: func() string { return "f47ac10b-58cc-0372-8567-0e02b2c3d479" }, Lookup: func(r *http.Request) *route.Target { @@ -88,13 +86,11 @@ func TestProxySTSHeader(t *testing.T) { defer server.Close() proxy := httptest.NewTLSServer(&HTTPProxy{ - Config: &config.Config{ - Proxy: config.Proxy{ - STSHeader: config.STSHeader{ - MaxAge: 31536000, - Subdomains: true, - Preload: true, - }, + Config: config.Proxy{ + STSHeader: config.STSHeader{ + MaxAge: 31536000, + Subdomains: true, + Preload: true, }, }, Transport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, @@ -153,7 +149,6 @@ func TestProxyNoRouteHTML(t *testing.T) { want := "503" noroute.SetHTML(want) proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, Transport: http.DefaultTransport, Lookup: func(*http.Request) *route.Target { return nil }, }) @@ -167,9 +162,7 @@ func TestProxyNoRouteHTML(t *testing.T) { func TestProxyNoRouteStatus(t *testing.T) { proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{ - Proxy: config.Proxy{NoRouteStatus: 999}, - }, + Config: config.Proxy{NoRouteStatus: 999}, Transport: http.DefaultTransport, Lookup: func(*http.Request) *route.Target { return nil }, }) @@ -192,7 +185,6 @@ func TestProxyStripsPath(t *testing.T) { })) proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock /foo/bar " + server.URL + ` opts "strip=/foo"`) @@ -225,7 +217,6 @@ func TestProxyHost(t *testing.T) { tbl, _ := route.NewTable(routes) proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, Transport: &http.Transport{ Dial: func(network, addr string) (net.Conn, error) { addr = server.URL[len("http://"):] @@ -278,7 +269,6 @@ func TestRedirect(t *testing.T) { tbl, _ := route.NewTable(routes) proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) @@ -357,7 +347,7 @@ func testProxyLogOutput(t *testing.T, bodySize int, cfg config.Proxy) { // create a proxy handler with mocked time tm := time.Date(2016, 1, 1, 0, 0, 0, 12345678, time.UTC) proxyHandler := &HTTPProxy{ - Config: &config.Config{}, + Config: cfg, Time: func() time.Time { defer func() { tm = tm.Add(1111111111 * time.Nanosecond) }() return tm @@ -445,7 +435,7 @@ func TestProxyHTTPSUpstream(t *testing.T) { defer server.Close() proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, + Config: config.Proxy{}, Transport: &http.Transport{TLSClientConfig: tlsClientConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https"`) @@ -470,7 +460,7 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { defer server.Close() proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, + Config: config.Proxy{}, Transport: http.DefaultTransport, InsecureTransport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, @@ -550,10 +540,8 @@ func TestProxyGzipHandler(t *testing.T) { defer server.Close() proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{ - Proxy: config.Proxy{ - GZIPContentTypes: regexp.MustCompile("^text/plain(;.*)?$"), - }, + Config: config.Proxy{ + GZIPContentTypes: regexp.MustCompile("^text/plain(;.*)?$"), }, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { @@ -673,11 +661,9 @@ func BenchmarkProxyLogger(b *testing.B) { } proxy := &HTTPProxy{ - Config: &config.Config{ - Proxy: config.Proxy{ - LocalIP: "1.1.1.1", - ClientIPHeader: "X-Forwarded-For", - }, + Config: config.Proxy{ + LocalIP: "1.1.1.1", + ClientIPHeader: "X-Forwarded-For", }, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { diff --git a/proxy/http_proxy.go b/proxy/http_proxy.go index e0eb3010c..2991cad56 100644 --- a/proxy/http_proxy.go +++ b/proxy/http_proxy.go @@ -16,7 +16,6 @@ import ( "github.com/fabiolb/fabio/logger" "github.com/fabiolb/fabio/metrics" "github.com/fabiolb/fabio/noroute" - "github.com/fabiolb/fabio/proxy/fastcgi" "github.com/fabiolb/fabio/proxy/gzip" "github.com/fabiolb/fabio/route" "github.com/fabiolb/fabio/uuid" @@ -25,7 +24,7 @@ import ( // HTTPProxy is a dynamic reverse proxy for HTTP and HTTPS protocols. type HTTPProxy struct { // Config is the proxy configuration as provided during startup. - Config *config.Config + Config config.Proxy // Time returns the current time as the number of seconds since the epoch. // If Time is nil, time.Now is used. @@ -64,17 +63,17 @@ func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { panic("no lookup function") } - if p.Config.Proxy.RequestID != "" { + if p.Config.RequestID != "" { id := p.UUID if id == nil { id = uuid.NewUUID } - r.Header.Set(p.Config.Proxy.RequestID, id()) + r.Header.Set(p.Config.RequestID, id()) } t := p.Lookup(r) if t == nil { - status := p.Config.Proxy.NoRouteStatus + status := p.Config.NoRouteStatus if status < 100 || status > 999 { status = http.StatusNotFound } @@ -135,12 +134,12 @@ func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { targetURL.Path = targetURL.Path[len(t.StripPath):] } - if err := addHeaders(r, p.Config.Proxy, t.StripPath); err != nil { + if err := addHeaders(r, p.Config, t.StripPath); err != nil { http.Error(w, "cannot parse "+r.RemoteAddr, http.StatusInternalServerError) return } - if err := addResponseHeaders(w, r, p.Config.Proxy); err != nil { + if err := addResponseHeaders(w, r, p.Config); err != nil { http.Error(w, "cannot add response headers", http.StatusInternalServerError) return } @@ -152,27 +151,8 @@ func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { tr = p.InsecureTransport } - isFCGI := false - if v, ok := t.Opts["proto"]; ok && v == "fcgi" { - isFCGI = true - } - var h http.Handler switch { - case isFCGI: - fcgiProxy := fastcgi.NewProxy(p.Config, targetURL.Host) - if fcgiRoot, ok := t.Opts["root"]; ok { - fcgiProxy.SetRoot(fcgiRoot) - } - if stripPrefix, ok := t.Opts["strip"]; ok { - fcgiProxy.SetStripPathPrefix(stripPrefix) - } - if indexFile, ok := t.Opts["index"]; ok { - fcgiProxy.SetIndex(indexFile) - } - - h = fcgiProxy - case upgrade == "websocket" || upgrade == "Websocket": r.URL = targetURL if targetURL.Scheme == "https" || targetURL.Scheme == "wss" { @@ -186,14 +166,14 @@ func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { case accept == "text/event-stream": // use the flush interval for SSE (server-sent events) // must be > 0s to be effective - h = newHTTPProxy(targetURL, tr, p.Config.Proxy.FlushInterval) + h = newHTTPProxy(targetURL, tr, p.Config.FlushInterval) default: h = newHTTPProxy(targetURL, tr, time.Duration(0)) } - if p.Config.Proxy.GZIPContentTypes != nil { - h = gzip.NewGzipHandler(h, p.Config.Proxy.GZIPContentTypes) + if p.Config.GZIPContentTypes != nil { + h = gzip.NewGzipHandler(h, p.Config.GZIPContentTypes) } timeNow := p.Time diff --git a/proxy/listen_test.go b/proxy/listen_test.go index a57058f65..607461641 100644 --- a/proxy/listen_test.go +++ b/proxy/listen_test.go @@ -29,7 +29,6 @@ func TestGracefulShutdown(t *testing.T) { go func() { defer wg.Done() h := &HTTPProxy{ - Config: &config.Config{}, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add svc / " + srv.URL) diff --git a/proxy/ws_integration_test.go b/proxy/ws_integration_test.go index 24dcbcdf7..a03977481 100644 --- a/proxy/ws_integration_test.go +++ b/proxy/ws_integration_test.go @@ -39,7 +39,7 @@ func TestProxyWSUpstream(t *testing.T) { routes += "route add ws /foo/strip " + wsServer.URL + ` opts "strip=/foo"` + "\n" httpProxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{Proxy: config.Proxy{NoRouteStatus: 404, GZIPContentTypes: regexp.MustCompile(".*")}}, + Config: config.Proxy{NoRouteStatus: 404, GZIPContentTypes: regexp.MustCompile(".*")}, Transport: &http.Transport{TLSClientConfig: tlsClientConfig()}, InsecureTransport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, Lookup: func(r *http.Request) *route.Target { @@ -51,7 +51,7 @@ func TestProxyWSUpstream(t *testing.T) { t.Log("Started HTTP proxy: ", httpProxy.URL) httpsProxy := httptest.NewUnstartedServer(&HTTPProxy{ - Config: &config.Config{Proxy: config.Proxy{NoRouteStatus: 404}}, + Config: config.Proxy{NoRouteStatus: 404}, Transport: &http.Transport{TLSClientConfig: tlsClientConfig()}, InsecureTransport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, Lookup: func(r *http.Request) *route.Target { From dbadf8d9a6da9eb4f3bc56880a30a21be42130d9 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 18:13:20 +0200 Subject: [PATCH 07/44] Update go version for testing --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6d2ff11ba..695439625 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ VENDORFMT = $(shell which vendorfmt) # pin versions for CI builds CI_CONSUL_VERSION=1.0.6 CI_VAULT_VERSION=0.9.6 -CI_GO_VERSION=1.10.2 +CI_GO_VERSION=1.10.3 # all is the default target all: test From 49ac5aefbbdc64bf57436dcaddfe1acf0fd11a25 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 18:13:34 +0200 Subject: [PATCH 08/44] Fix test compile error --- proxy/http_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index 5a6d083fa..453d7fdb8 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -123,7 +123,7 @@ func TestProxyChecksHeaderForAccessRules(t *testing.T) { defer server.Close() proxy := httptest.NewServer(&HTTPProxy{ - Config: &config.Config{}, + Config: config.Proxy{}, Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tgt := &route.Target{ From d46a51ce83d5c363024cef4aca99b947d6366c7b Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 18:13:51 +0200 Subject: [PATCH 09/44] Update Dockerfile for testing --- Dockerfile-test | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Dockerfile-test b/Dockerfile-test index c7d56d2fc..b9b04ca46 100644 --- a/Dockerfile-test +++ b/Dockerfile-test @@ -1,14 +1,14 @@ FROM ubuntu -RUN apt-get update && apt-get -y install unzip make git-core -ARG consul_version -ARG vault_version -ARG go_version -COPY consul_${consul_version}_linux_amd64.zip /tmp -COPY vault_${vault_version}_linux_amd64.zip /tmp -COPY go${go_version}.linux-amd64.tar.gz /tmp -RUN unzip /tmp/consul_${consul_version}_linux_amd64.zip -d /usr/local/bin -RUN unzip /tmp/vault_${vault_version}_linux_amd64.zip -d /usr/local/bin -RUN tar -C /usr/local -x -f /tmp/go${go_version}.linux-amd64.tar.gz +RUN apt-get update && apt-get -y install make git-core unzip +ARG consul_version=1.0.6 +ARG vault_version=0.9.6 +ARG go_version=1.10.3 +ADD https://releases.hashicorp.com/consul/${consul_version}/consul_${consul_version}_linux_amd64.zip /usr/local/bin +ADD https://releases.hashicorp.com/vault/${vault_version}/vault_${vault_version}_linux_amd64.zip /usr/local/bin +ADD https://dl.google.com/go/go${go_version}.linux-amd64.tar.gz /usr/local +RUN cd /usr/local/bin && unzip consul_${consul_version}_linux_amd64.zip +RUN cd /usr/local/bin && unzip vault_${vault_version}_linux_amd64.zip +RUN cd /usr/local && tar xf go${go_version}.linux-amd64.tar.gz ENV PATH=/usr/local/go/bin:/root/go/bin:$PATH WORKDIR /root/go/src/github.com/fabiolb/fabio COPY . . From 270d7d2b8b85b72402c0d4459f4f97bc14d122de Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 21:55:01 +0200 Subject: [PATCH 10/44] switch to go mod --- go.mod | 30 + vendor/github.com/armon/go-proxyproto/LICENSE | 21 - .../github.com/armon/go-proxyproto/README.md | 36 - .../armon/go-proxyproto/protocol.go | 194 -- .../circonus-labs/circonus-gometrics/LICENSE | 28 - .../circonus-gometrics/README.md | 181 -- .../circonus-gometrics/api/api.go | 214 -- .../circonus-gometrics/api/broker.go | 89 - .../circonus-gometrics/api/check.go | 113 - .../circonus-gometrics/api/checkbundle.go | 132 - .../circonus-gometrics/checkmgr/broker.go | 199 -- .../circonus-gometrics/checkmgr/cert.go | 87 - .../circonus-gometrics/checkmgr/check.go | 211 -- .../circonus-gometrics/checkmgr/checkmgr.go | 365 --- .../circonus-gometrics/checkmgr/metrics.go | 79 - .../circonus-gometrics/circonus-gometrics.go | 254 -- .../circonus-gometrics/counter.go | 55 - .../circonus-labs/circonus-gometrics/gauge.go | 81 - .../circonus-gometrics/histogram.go | 77 - .../circonus-gometrics/submit.go | 126 - .../circonus-labs/circonus-gometrics/text.go | 41 - .../circonus-labs/circonus-gometrics/tools.go | 23 - .../circonus-labs/circonus-gometrics/util.go | 100 - .../circonus-labs/circonusllhist/LICENSE | 28 - .../circonusllhist/circonusllhist.go | 555 ---- .../cyberdelia/go-metrics-graphite/AUTHORS | 8 - .../cyberdelia/go-metrics-graphite/LICENSE | 22 - .../cyberdelia/go-metrics-graphite/README.md | 19 - .../go-metrics-graphite/graphite.go | 113 - vendor/github.com/fatih/structs/LICENSE | 21 - vendor/github.com/fatih/structs/README.md | 163 - vendor/github.com/fatih/structs/field.go | 141 - vendor/github.com/fatih/structs/structs.go | 507 --- vendor/github.com/fatih/structs/tags.go | 32 - vendor/github.com/gobwas/glob/LICENSE | 21 - vendor/github.com/gobwas/glob/bench.sh | 26 - .../gobwas/glob/compiler/compiler.go | 525 ---- vendor/github.com/gobwas/glob/glob.go | 80 - vendor/github.com/gobwas/glob/match/any.go | 45 - vendor/github.com/gobwas/glob/match/any_of.go | 82 - vendor/github.com/gobwas/glob/match/btree.go | 185 -- .../github.com/gobwas/glob/match/contains.go | 58 - .../github.com/gobwas/glob/match/every_of.go | 99 - vendor/github.com/gobwas/glob/match/list.go | 49 - vendor/github.com/gobwas/glob/match/match.go | 81 - vendor/github.com/gobwas/glob/match/max.go | 49 - vendor/github.com/gobwas/glob/match/min.go | 57 - .../github.com/gobwas/glob/match/nothing.go | 27 - vendor/github.com/gobwas/glob/match/prefix.go | 50 - .../gobwas/glob/match/prefix_any.go | 55 - .../gobwas/glob/match/prefix_suffix.go | 62 - vendor/github.com/gobwas/glob/match/range.go | 48 - vendor/github.com/gobwas/glob/match/row.go | 77 - .../github.com/gobwas/glob/match/segments.go | 91 - vendor/github.com/gobwas/glob/match/single.go | 43 - vendor/github.com/gobwas/glob/match/suffix.go | 35 - .../gobwas/glob/match/suffix_any.go | 43 - vendor/github.com/gobwas/glob/match/super.go | 33 - vendor/github.com/gobwas/glob/match/text.go | 45 - vendor/github.com/gobwas/glob/readme.md | 148 - .../github.com/gobwas/glob/syntax/ast/ast.go | 122 - .../gobwas/glob/syntax/ast/parser.go | 157 - .../gobwas/glob/syntax/lexer/lexer.go | 273 -- .../gobwas/glob/syntax/lexer/token.go | 88 - .../github.com/gobwas/glob/syntax/syntax.go | 14 - .../gobwas/glob/util/runes/runes.go | 154 - .../gobwas/glob/util/strings/strings.go | 39 - vendor/github.com/hashicorp/consul/LICENSE | 354 --- .../github.com/hashicorp/consul/api/README.md | 43 - vendor/github.com/hashicorp/consul/api/acl.go | 140 - .../github.com/hashicorp/consul/api/agent.go | 471 --- vendor/github.com/hashicorp/consul/api/api.go | 649 ---- .../hashicorp/consul/api/catalog.go | 194 -- .../hashicorp/consul/api/coordinate.go | 67 - .../github.com/hashicorp/consul/api/event.go | 104 - .../github.com/hashicorp/consul/api/health.go | 199 -- vendor/github.com/hashicorp/consul/api/kv.go | 419 --- .../github.com/hashicorp/consul/api/lock.go | 384 --- .../hashicorp/consul/api/operator.go | 11 - .../hashicorp/consul/api/operator_area.go | 168 - .../consul/api/operator_autopilot.go | 215 -- .../hashicorp/consul/api/operator_keyring.go | 83 - .../hashicorp/consul/api/operator_raft.go | 86 - .../hashicorp/consul/api/prepared_query.go | 198 -- vendor/github.com/hashicorp/consul/api/raw.go | 24 - .../hashicorp/consul/api/semaphore.go | 512 --- .../hashicorp/consul/api/session.go | 217 -- .../hashicorp/consul/api/snapshot.go | 47 - .../github.com/hashicorp/consul/api/status.go | 43 - vendor/github.com/hashicorp/errwrap/LICENSE | 354 --- vendor/github.com/hashicorp/errwrap/README.md | 89 - .../github.com/hashicorp/errwrap/errwrap.go | 169 - .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 --- .../hashicorp/go-cleanhttp/README.md | 30 - .../hashicorp/go-cleanhttp/cleanhttp.go | 53 - .../github.com/hashicorp/go-cleanhttp/doc.go | 20 - .../hashicorp/go-multierror/LICENSE | 353 --- .../hashicorp/go-multierror/README.md | 91 - .../hashicorp/go-multierror/append.go | 37 - .../hashicorp/go-multierror/flatten.go | 26 - .../hashicorp/go-multierror/format.go | 23 - .../hashicorp/go-multierror/multierror.go | 51 - .../hashicorp/go-multierror/prefix.go | 37 - .../hashicorp/go-retryablehttp/LICENSE | 363 --- .../hashicorp/go-retryablehttp/Makefile | 11 - .../hashicorp/go-retryablehttp/README.md | 43 - .../hashicorp/go-retryablehttp/client.go | 302 -- .../github.com/hashicorp/go-rootcerts/LICENSE | 363 --- .../hashicorp/go-rootcerts/Makefile | 8 - .../hashicorp/go-rootcerts/README.md | 43 - .../github.com/hashicorp/go-rootcerts/doc.go | 9 - .../hashicorp/go-rootcerts/rootcerts.go | 103 - .../hashicorp/go-rootcerts/rootcerts_base.go | 12 - .../go-rootcerts/rootcerts_darwin.go | 48 - vendor/github.com/hashicorp/hcl/LICENSE | 354 --- vendor/github.com/hashicorp/hcl/Makefile | 17 - vendor/github.com/hashicorp/hcl/README.md | 104 - vendor/github.com/hashicorp/hcl/appveyor.yml | 16 - vendor/github.com/hashicorp/hcl/decoder.go | 654 ---- vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 211 -- .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 454 --- .../hashicorp/hcl/hcl/scanner/scanner.go | 629 ---- .../hashicorp/hcl/hcl/strconv/quote.go | 248 -- .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 214 -- .../hashicorp/hcl/json/parser/flatten.go | 111 - .../hashicorp/hcl/json/parser/parser.go | 297 -- .../hashicorp/hcl/json/scanner/scanner.go | 451 --- .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - vendor/github.com/hashicorp/hcl/lex.go | 38 - vendor/github.com/hashicorp/hcl/parse.go | 39 - vendor/github.com/hashicorp/serf/LICENSE | 354 --- .../hashicorp/serf/coordinate/README.md | 1 - .../hashicorp/serf/coordinate/client.go | 180 -- .../hashicorp/serf/coordinate/config.go | 70 - .../hashicorp/serf/coordinate/coordinate.go | 183 -- .../hashicorp/serf/coordinate/phantom.go | 187 -- .../hashicorp/serf/coordinate/test_util.go | 27 - vendor/github.com/hashicorp/vault/LICENSE | 363 --- vendor/github.com/hashicorp/vault/api/SPEC.md | 611 ---- vendor/github.com/hashicorp/vault/api/auth.go | 11 - .../hashicorp/vault/api/auth_token.go | 178 -- .../github.com/hashicorp/vault/api/client.go | 333 -- vendor/github.com/hashicorp/vault/api/help.go | 25 - .../github.com/hashicorp/vault/api/logical.go | 123 - .../github.com/hashicorp/vault/api/request.go | 71 - .../hashicorp/vault/api/response.go | 75 - .../github.com/hashicorp/vault/api/secret.go | 66 - vendor/github.com/hashicorp/vault/api/ssh.go | 38 - .../hashicorp/vault/api/ssh_agent.go | 239 -- vendor/github.com/hashicorp/vault/api/sys.go | 11 - .../hashicorp/vault/api/sys_audit.go | 85 - .../hashicorp/vault/api/sys_auth.go | 56 - .../hashicorp/vault/api/sys_capabilities.go | 60 - .../hashicorp/vault/api/sys_generate_root.go | 77 - .../hashicorp/vault/api/sys_init.go | 51 - .../hashicorp/vault/api/sys_leader.go | 20 - .../hashicorp/vault/api/sys_lease.go | 45 - .../hashicorp/vault/api/sys_mounts.go | 113 - .../hashicorp/vault/api/sys_policy.go | 72 - .../hashicorp/vault/api/sys_rekey.go | 200 -- .../hashicorp/vault/api/sys_rotate.go | 30 - .../hashicorp/vault/api/sys_seal.go | 56 - .../hashicorp/vault/api/sys_stepdown.go | 10 - .../magiconair/properties/CHANGELOG.md | 104 - .../github.com/magiconair/properties/LICENSE | 25 - .../magiconair/properties/README.md | 100 - .../magiconair/properties/decode.go | 289 -- .../github.com/magiconair/properties/doc.go | 156 - .../magiconair/properties/integrate.go | 34 - .../github.com/magiconair/properties/lex.go | 407 --- .../github.com/magiconair/properties/load.go | 241 -- .../magiconair/properties/parser.go | 95 - .../magiconair/properties/properties.go | 811 ----- .../magiconair/properties/rangecheck.go | 31 - .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 137 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 151 - .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 767 ----- .../pascaldekloe/goe/verify/values.go | 168 - .../pascaldekloe/goe/verify/verify.go | 73 - vendor/github.com/pkg/profile/AUTHORS | 1 - vendor/github.com/pkg/profile/LICENSE | 24 - vendor/github.com/pkg/profile/README.md | 54 - vendor/github.com/pkg/profile/mutex.go | 13 - vendor/github.com/pkg/profile/mutex17.go | 9 - vendor/github.com/pkg/profile/profile.go | 244 -- vendor/github.com/pkg/profile/trace.go | 8 - vendor/github.com/pkg/profile/trace16.go | 10 - .../pubnub/go-metrics-statsd/LICENSE | 21 - .../pubnub/go-metrics-statsd/README.md | 18 - .../pubnub/go-metrics-statsd/statsd.go | 117 - vendor/github.com/rcrowley/go-metrics/LICENSE | 29 - .../github.com/rcrowley/go-metrics/README.md | 168 - .../github.com/rcrowley/go-metrics/counter.go | 112 - .../github.com/rcrowley/go-metrics/debug.go | 76 - vendor/github.com/rcrowley/go-metrics/ewma.go | 138 - .../github.com/rcrowley/go-metrics/gauge.go | 120 - .../rcrowley/go-metrics/gauge_float64.go | 125 - .../rcrowley/go-metrics/graphite.go | 113 - .../rcrowley/go-metrics/healthcheck.go | 61 - .../rcrowley/go-metrics/histogram.go | 202 -- vendor/github.com/rcrowley/go-metrics/json.go | 31 - vendor/github.com/rcrowley/go-metrics/log.go | 80 - .../github.com/rcrowley/go-metrics/memory.md | 285 -- .../github.com/rcrowley/go-metrics/meter.go | 257 -- .../github.com/rcrowley/go-metrics/metrics.go | 13 - .../rcrowley/go-metrics/opentsdb.go | 119 - .../rcrowley/go-metrics/registry.go | 363 --- .../github.com/rcrowley/go-metrics/runtime.go | 212 -- .../rcrowley/go-metrics/runtime_cgo.go | 10 - .../go-metrics/runtime_gccpufraction.go | 9 - .../rcrowley/go-metrics/runtime_no_cgo.go | 7 - .../go-metrics/runtime_no_gccpufraction.go | 9 - .../github.com/rcrowley/go-metrics/sample.go | 616 ---- .../github.com/rcrowley/go-metrics/syslog.go | 78 - .../github.com/rcrowley/go-metrics/timer.go | 329 -- .../rcrowley/go-metrics/validate.sh | 10 - .../github.com/rcrowley/go-metrics/writer.go | 100 - vendor/github.com/rogpeppe/fastuuid/LICENSE | 26 - vendor/github.com/rogpeppe/fastuuid/README.md | 50 - vendor/github.com/rogpeppe/fastuuid/uuid.go | 66 - vendor/github.com/sergi/go-diff/LICENSE | 20 - .../sergi/go-diff/diffmatchpatch/diff.go | 1339 -------- .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 - .../sergi/go-diff/diffmatchpatch/match.go | 160 - .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 - .../sergi/go-diff/diffmatchpatch/patch.go | 556 ---- .../go-diff/diffmatchpatch/stringutil.go | 88 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - .../x/net/http2/client_conn_pool.go | 256 -- .../x/net/http2/configure_transport.go | 80 - vendor/golang.org/x/net/http2/errors.go | 130 - vendor/golang.org/x/net/http2/fixed_buffer.go | 60 - vendor/golang.org/x/net/http2/flow.go | 50 - vendor/golang.org/x/net/http2/frame.go | 1544 --------- vendor/golang.org/x/net/http2/go16.go | 43 - vendor/golang.org/x/net/http2/go17.go | 106 - vendor/golang.org/x/net/http2/go17_not18.go | 36 - vendor/golang.org/x/net/http2/go18.go | 50 - vendor/golang.org/x/net/http2/gotrack.go | 170 - vendor/golang.org/x/net/http2/headermap.go | 78 - vendor/golang.org/x/net/http2/hpack/encode.go | 251 -- vendor/golang.org/x/net/http2/hpack/hpack.go | 542 ---- .../golang.org/x/net/http2/hpack/huffman.go | 212 -- vendor/golang.org/x/net/http2/hpack/tables.go | 352 --- vendor/golang.org/x/net/http2/http2.go | 387 --- vendor/golang.org/x/net/http2/not_go16.go | 46 - vendor/golang.org/x/net/http2/not_go17.go | 87 - vendor/golang.org/x/net/http2/not_go18.go | 27 - vendor/golang.org/x/net/http2/pipe.go | 153 - vendor/golang.org/x/net/http2/server.go | 2753 ----------------- vendor/golang.org/x/net/http2/transport.go | 2129 ------------- vendor/golang.org/x/net/http2/write.go | 370 --- vendor/golang.org/x/net/http2/writesched.go | 242 -- .../x/net/http2/writesched_priority.go | 452 --- .../x/net/http2/writesched_random.go | 72 - vendor/golang.org/x/net/idna/idna.go | 68 - vendor/golang.org/x/net/idna/punycode.go | 200 -- .../golang.org/x/net/lex/httplex/httplex.go | 351 --- vendor/golang.org/x/net/websocket/client.go | 113 - vendor/golang.org/x/net/websocket/hybi.go | 586 ---- vendor/golang.org/x/net/websocket/server.go | 113 - .../golang.org/x/net/websocket/websocket.go | 412 --- vendor/golang.org/x/sync/LICENSE | 27 - vendor/golang.org/x/sync/PATENTS | 22 - .../x/sync/singleflight/singleflight.go | 111 - vendor/vendor.json | 54 - 280 files changed, 30 insertions(+), 46217 deletions(-) create mode 100644 go.mod delete mode 100644 vendor/github.com/armon/go-proxyproto/LICENSE delete mode 100644 vendor/github.com/armon/go-proxyproto/README.md delete mode 100644 vendor/github.com/armon/go-proxyproto/protocol.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/LICENSE delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/README.md delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/api.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/check.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/counter.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/gauge.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/histogram.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/submit.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/text.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/tools.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/util.go delete mode 100644 vendor/github.com/circonus-labs/circonusllhist/LICENSE delete mode 100644 vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go delete mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS delete mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE delete mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/README.md delete mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go delete mode 100644 vendor/github.com/fatih/structs/LICENSE delete mode 100644 vendor/github.com/fatih/structs/README.md delete mode 100644 vendor/github.com/fatih/structs/field.go delete mode 100644 vendor/github.com/fatih/structs/structs.go delete mode 100644 vendor/github.com/fatih/structs/tags.go delete mode 100644 vendor/github.com/gobwas/glob/LICENSE delete mode 100755 vendor/github.com/gobwas/glob/bench.sh delete mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go delete mode 100644 vendor/github.com/gobwas/glob/glob.go delete mode 100644 vendor/github.com/gobwas/glob/match/any.go delete mode 100644 vendor/github.com/gobwas/glob/match/any_of.go delete mode 100644 vendor/github.com/gobwas/glob/match/btree.go delete mode 100644 vendor/github.com/gobwas/glob/match/contains.go delete mode 100644 vendor/github.com/gobwas/glob/match/every_of.go delete mode 100644 vendor/github.com/gobwas/glob/match/list.go delete mode 100644 vendor/github.com/gobwas/glob/match/match.go delete mode 100644 vendor/github.com/gobwas/glob/match/max.go delete mode 100644 vendor/github.com/gobwas/glob/match/min.go delete mode 100644 vendor/github.com/gobwas/glob/match/nothing.go delete mode 100644 vendor/github.com/gobwas/glob/match/prefix.go delete mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go delete mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go delete mode 100644 vendor/github.com/gobwas/glob/match/range.go delete mode 100644 vendor/github.com/gobwas/glob/match/row.go delete mode 100644 vendor/github.com/gobwas/glob/match/segments.go delete mode 100644 vendor/github.com/gobwas/glob/match/single.go delete mode 100644 vendor/github.com/gobwas/glob/match/suffix.go delete mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go delete mode 100644 vendor/github.com/gobwas/glob/match/super.go delete mode 100644 vendor/github.com/gobwas/glob/match/text.go delete mode 100644 vendor/github.com/gobwas/glob/readme.md delete mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go delete mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go delete mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go delete mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go delete mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go delete mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go delete mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go delete mode 100644 vendor/github.com/hashicorp/consul/LICENSE delete mode 100644 vendor/github.com/hashicorp/consul/api/README.md delete mode 100644 vendor/github.com/hashicorp/consul/api/acl.go delete mode 100644 vendor/github.com/hashicorp/consul/api/agent.go delete mode 100644 vendor/github.com/hashicorp/consul/api/api.go delete mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go delete mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go delete mode 100644 vendor/github.com/hashicorp/consul/api/event.go delete mode 100644 vendor/github.com/hashicorp/consul/api/health.go delete mode 100644 vendor/github.com/hashicorp/consul/api/kv.go delete mode 100644 vendor/github.com/hashicorp/consul/api/lock.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go delete mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go delete mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go delete mode 100644 vendor/github.com/hashicorp/consul/api/raw.go delete mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 vendor/github.com/hashicorp/consul/api/session.go delete mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go delete mode 100644 vendor/github.com/hashicorp/consul/api/status.go delete mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE delete mode 100644 vendor/github.com/hashicorp/errwrap/README.md delete mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go delete mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-multierror/README.md delete mode 100644 vendor/github.com/hashicorp/go-multierror/append.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/format.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go delete mode 100644 vendor/github.com/hashicorp/hcl/LICENSE delete mode 100644 vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 vendor/github.com/hashicorp/hcl/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 vendor/github.com/hashicorp/hcl/parse.go delete mode 100644 vendor/github.com/hashicorp/serf/LICENSE delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/README.md delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go delete mode 100644 vendor/github.com/hashicorp/serf/coordinate/test_util.go delete mode 100644 vendor/github.com/hashicorp/vault/LICENSE delete mode 100644 vendor/github.com/hashicorp/vault/api/SPEC.md delete mode 100644 vendor/github.com/hashicorp/vault/api/auth.go delete mode 100644 vendor/github.com/hashicorp/vault/api/auth_token.go delete mode 100644 vendor/github.com/hashicorp/vault/api/client.go delete mode 100644 vendor/github.com/hashicorp/vault/api/help.go delete mode 100644 vendor/github.com/hashicorp/vault/api/logical.go delete mode 100644 vendor/github.com/hashicorp/vault/api/request.go delete mode 100644 vendor/github.com/hashicorp/vault/api/response.go delete mode 100644 vendor/github.com/hashicorp/vault/api/secret.go delete mode 100644 vendor/github.com/hashicorp/vault/api/ssh.go delete mode 100644 vendor/github.com/hashicorp/vault/api/ssh_agent.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_audit.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_auth.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_capabilities.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_generate_root.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_init.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_leader.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_lease.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_mounts.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_policy.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_rekey.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_rotate.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_seal.go delete mode 100644 vendor/github.com/hashicorp/vault/api/sys_stepdown.go delete mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md delete mode 100644 vendor/github.com/magiconair/properties/LICENSE delete mode 100644 vendor/github.com/magiconair/properties/README.md delete mode 100644 vendor/github.com/magiconair/properties/decode.go delete mode 100644 vendor/github.com/magiconair/properties/doc.go delete mode 100644 vendor/github.com/magiconair/properties/integrate.go delete mode 100644 vendor/github.com/magiconair/properties/lex.go delete mode 100644 vendor/github.com/magiconair/properties/load.go delete mode 100644 vendor/github.com/magiconair/properties/parser.go delete mode 100644 vendor/github.com/magiconair/properties/properties.go delete mode 100644 vendor/github.com/magiconair/properties/rangecheck.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go delete mode 100644 vendor/github.com/pascaldekloe/goe/verify/values.go delete mode 100644 vendor/github.com/pascaldekloe/goe/verify/verify.go delete mode 100644 vendor/github.com/pkg/profile/AUTHORS delete mode 100644 vendor/github.com/pkg/profile/LICENSE delete mode 100644 vendor/github.com/pkg/profile/README.md delete mode 100644 vendor/github.com/pkg/profile/mutex.go delete mode 100644 vendor/github.com/pkg/profile/mutex17.go delete mode 100644 vendor/github.com/pkg/profile/profile.go delete mode 100644 vendor/github.com/pkg/profile/trace.go delete mode 100644 vendor/github.com/pkg/profile/trace16.go delete mode 100644 vendor/github.com/pubnub/go-metrics-statsd/LICENSE delete mode 100644 vendor/github.com/pubnub/go-metrics-statsd/README.md delete mode 100644 vendor/github.com/pubnub/go-metrics-statsd/statsd.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE delete mode 100644 vendor/github.com/rcrowley/go-metrics/README.md delete mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/json.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/log.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md delete mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go delete mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go delete mode 100755 vendor/github.com/rcrowley/go-metrics/validate.sh delete mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go delete mode 100644 vendor/github.com/rogpeppe/fastuuid/LICENSE delete mode 100644 vendor/github.com/rogpeppe/fastuuid/README.md delete mode 100644 vendor/github.com/rogpeppe/fastuuid/uuid.go delete mode 100644 vendor/github.com/sergi/go-diff/LICENSE delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/golang.org/x/net/http2/errors.go delete mode 100644 vendor/golang.org/x/net/http2/fixed_buffer.go delete mode 100644 vendor/golang.org/x/net/http2/flow.go delete mode 100644 vendor/golang.org/x/net/http2/frame.go delete mode 100644 vendor/golang.org/x/net/http2/go16.go delete mode 100644 vendor/golang.org/x/net/http2/go17.go delete mode 100644 vendor/golang.org/x/net/http2/go17_not18.go delete mode 100644 vendor/golang.org/x/net/http2/go18.go delete mode 100644 vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/golang.org/x/net/http2/http2.go delete mode 100644 vendor/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/golang.org/x/net/http2/not_go17.go delete mode 100644 vendor/golang.org/x/net/http2/not_go18.go delete mode 100644 vendor/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/golang.org/x/net/http2/server.go delete mode 100644 vendor/golang.org/x/net/http2/transport.go delete mode 100644 vendor/golang.org/x/net/http2/write.go delete mode 100644 vendor/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 vendor/golang.org/x/net/idna/idna.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go delete mode 100644 vendor/golang.org/x/net/websocket/client.go delete mode 100644 vendor/golang.org/x/net/websocket/hybi.go delete mode 100644 vendor/golang.org/x/net/websocket/server.go delete mode 100644 vendor/golang.org/x/net/websocket/websocket.go delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go delete mode 100644 vendor/vendor.json diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..4ff408fb7 --- /dev/null +++ b/go.mod @@ -0,0 +1,30 @@ +module github.com/fabiolb/fabio + +require ( + github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 + github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 + github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 + github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 + github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10 + github.com/gobwas/glob v0.0.0-20180208211842-19c076cdf202 + github.com/hashicorp/consul v0.8.0 + github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce + github.com/hashicorp/go-cleanhttp v0.0.0-20160407174126-ad28ea4487f0 + github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 + github.com/hashicorp/go-retryablehttp v0.0.0-20160810172255-f4ed9b0fa01a + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 + github.com/hashicorp/hcl v0.0.0-20160607001940-d7400db7143f + github.com/hashicorp/serf v0.7.0 + github.com/hashicorp/vault v0.6.0 + github.com/magiconair/properties v0.0.0-20171031211101-49d762b9817b + github.com/mitchellh/go-homedir v0.0.0-20160606030122-1111e456ffea + github.com/mitchellh/mapstructure v0.0.0-20160212031839-d2dd02622084 + github.com/pascaldekloe/goe v0.0.0-20150719195608-f99183613f48 + github.com/pkg/profile v1.2.1 + github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b + github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 + github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af + github.com/sergi/go-diff v0.0.0-20170118131230-24e2351369ec + golang.org/x/net v0.0.0-20170114055629-f2499483f923 + golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654 +) diff --git a/vendor/github.com/armon/go-proxyproto/LICENSE b/vendor/github.com/armon/go-proxyproto/LICENSE deleted file mode 100644 index 3ed5f4302..000000000 --- a/vendor/github.com/armon/go-proxyproto/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/armon/go-proxyproto/README.md b/vendor/github.com/armon/go-proxyproto/README.md deleted file mode 100644 index 25a779cca..000000000 --- a/vendor/github.com/armon/go-proxyproto/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# proxyproto - -This library provides the `proxyproto` package which can be used for servers -listening behind HAProxy of Amazon ELB load balancers. Those load balancers -support the use of a proxy protocol (http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt), -which provides a simple mechansim for the server to get the address of the client -instead of the load balancer. - -This library provides both a net.Listener and net.Conn implementation that -can be used to handle situation in which you may be using the proxy protocol. -Only proxy protocol version 1, the human-readable form, is understood. - -The only caveat is that we check for the "PROXY " prefix to determine if the protocol -is being used. If that string may occur as part of your input, then it is ambiguous -if the protocol is being used and you may have problems. - -# Documentation - -Full documentation can be found [here](http://godoc.org/github.com/armon/go-proxyproto). - -# Examples - -Using the library is very simple: - -``` - -// Create a listener -list, err := net.Listen("tcp", "...") - -// Wrap listener in a proxyproto listener -proxyList := &proxyproto.Listener{list} -conn, err :=proxyList.Accept() - -... -``` - diff --git a/vendor/github.com/armon/go-proxyproto/protocol.go b/vendor/github.com/armon/go-proxyproto/protocol.go deleted file mode 100644 index 2fc1dfc01..000000000 --- a/vendor/github.com/armon/go-proxyproto/protocol.go +++ /dev/null @@ -1,194 +0,0 @@ -package proxyproto - -import ( - "bufio" - "bytes" - "fmt" - "io" - "log" - "net" - "strconv" - "strings" - "sync" - "time" -) - -var ( - // prefix is the string we look for at the start of a connection - // to check if this connection is using the proxy protocol - prefix = []byte("PROXY ") - prefixLen = len(prefix) -) - -// Listener is used to wrap an underlying listener, -// whose connections may be using the HAProxy Proxy Protocol (version 1). -// If the connection is using the protocol, the RemoteAddr() will return -// the correct client address. -type Listener struct { - Listener net.Listener -} - -// Conn is used to wrap and underlying connection which -// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will -// return the address of the client instead of the proxy address. -type Conn struct { - bufReader *bufio.Reader - conn net.Conn - dstAddr *net.TCPAddr - srcAddr *net.TCPAddr - once sync.Once -} - -// Accept waits for and returns the next connection to the listener. -func (p *Listener) Accept() (net.Conn, error) { - // Get the underlying connection - conn, err := p.Listener.Accept() - if err != nil { - return nil, err - } - return NewConn(conn), nil -} - -// Close closes the underlying listener. -func (p *Listener) Close() error { - return p.Listener.Close() -} - -// Addr returns the underlying listener's network address. -func (p *Listener) Addr() net.Addr { - return p.Listener.Addr() -} - -// NewConn is used to wrap a net.Conn that may be speaking -// the proxy protocol into a proxyproto.Conn -func NewConn(conn net.Conn) *Conn { - pConn := &Conn{ - bufReader: bufio.NewReader(conn), - conn: conn, - } - return pConn -} - -// Read is check for the proxy protocol header when doing -// the initial scan. If there is an error parsing the header, -// it is returned and the socket is closed. -func (p *Conn) Read(b []byte) (int, error) { - var err error - p.once.Do(func() { err = p.checkPrefix() }) - if err != nil { - return 0, err - } - return p.bufReader.Read(b) -} - -func (p *Conn) Write(b []byte) (int, error) { - return p.conn.Write(b) -} - -func (p *Conn) Close() error { - return p.conn.Close() -} - -func (p *Conn) LocalAddr() net.Addr { - return p.conn.LocalAddr() -} - -// RemoteAddr returns the address of the client if the proxy -// protocol is being used, otherwise just returns the address of -// the socket peer. If there is an error parsing the header, the -// address of the client is not returned, and the socket is closed. -// Once implication of this is that the call could block if the -// client is slow. Using a Deadline is recommended if this is called -// before Read() -func (p *Conn) RemoteAddr() net.Addr { - p.once.Do(func() { - if err := p.checkPrefix(); err != nil && err != io.EOF { - log.Printf("[ERR] Failed to read proxy prefix: %v", err) - } - }) - if p.srcAddr != nil { - return p.srcAddr - } - return p.conn.RemoteAddr() -} - -func (p *Conn) SetDeadline(t time.Time) error { - return p.conn.SetDeadline(t) -} - -func (p *Conn) SetReadDeadline(t time.Time) error { - return p.conn.SetReadDeadline(t) -} - -func (p *Conn) SetWriteDeadline(t time.Time) error { - return p.conn.SetWriteDeadline(t) -} - -func (p *Conn) checkPrefix() error { - // Incrementally check each byte of the prefix - for i := 1; i <= prefixLen; i++ { - inp, err := p.bufReader.Peek(i) - if err != nil { - return err - } - - // Check for a prefix mis-match, quit early - if !bytes.Equal(inp, prefix[:i]) { - return nil - } - } - - // Read the header line - header, err := p.bufReader.ReadString('\n') - if err != nil { - p.conn.Close() - return err - } - - // Strip the carriage return and new line - header = header[:len(header)-2] - - // Split on spaces, should be (PROXY ) - parts := strings.Split(header, " ") - if len(parts) != 6 { - p.conn.Close() - return fmt.Errorf("Invalid header line: %s", header) - } - - // Verify the type is known - switch parts[1] { - case "TCP4": - case "TCP6": - default: - p.conn.Close() - return fmt.Errorf("Unhandled address type: %s", parts[1]) - } - - // Parse out the source address - ip := net.ParseIP(parts[2]) - if ip == nil { - p.conn.Close() - return fmt.Errorf("Invalid source ip: %s", parts[2]) - } - port, err := strconv.Atoi(parts[4]) - if err != nil { - p.conn.Close() - return fmt.Errorf("Invalid source port: %s", parts[4]) - } - p.srcAddr = &net.TCPAddr{IP: ip, Port: port} - - // Parse out the destination address - ip = net.ParseIP(parts[3]) - if ip == nil { - p.conn.Close() - return fmt.Errorf("Invalid destination ip: %s", parts[3]) - } - port, err = strconv.Atoi(parts[5]) - if err != nil { - p.conn.Close() - return fmt.Errorf("Invalid destination port: %s", parts[5]) - } - p.dstAddr = &net.TCPAddr{IP: ip, Port: port} - - return nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/LICENSE b/vendor/github.com/circonus-labs/circonus-gometrics/LICENSE deleted file mode 100644 index 761798c3b..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2016, Circonus, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name Circonus, Inc. nor the names - of its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/README.md deleted file mode 100644 index d245e40af..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# Circonus metrics tracking for Go applications - -This library supports named counters, gauges and histograms. -It also provides convenience wrappers for registering latency -instrumented functions with Go's builtin http server. - -Initializing only requires setting an ApiToken. - -## Example - -**rough and simple** - -```go -package main - -import ( - "log" - "math/rand" - "os" - "time" - - cgm "github.com/circonus-labs/circonus-gometrics" -) - -func main() { - - log.Println("Configuring cgm") - - cmc := &cgm.Config{} - - // Interval at which metrics are submitted to Circonus, default: 10 seconds - cmc.Interval = "10s" // 10 seconds - // Enable debug messages, default: false - cmc.Debug = false - // Send debug messages to specific log.Logger instance - // default: if debug stderr, else, discard - //cmc.CheckManager.Log = ... - - // Circonus API configuration options - // - // Token, no default (blank disables check manager) - cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") - // App name, default: circonus-gometrics - cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") - // URL, default: https://api.circonus.com/v2 - cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") - - // Check configuration options - // - // precedence 1 - explicit submission_url - // precedence 2 - specific check id (note: not a check bundle id) - // precedence 3 - search using instanceId and searchTag - // otherwise: if an applicable check is NOT specified or found, an - // attempt will be made to automatically create one - // - // Pre-existing httptrap check submission_url - cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL") - // Pre-existing httptrap check id (check not check bundle) - cmc.CheckManager.Check.ID = "" - // if neither a submission url nor check id are provided, an attempt will be made to find an existing - // httptrap check by using the circonus api to search for a check matching the following criteria: - // an active check, - // of type httptrap, - // where the target/host is equal to InstanceId - see below - // and the check has a tag equal to SearchTag - see below - // Instance ID - an identifier for the 'group of metrics emitted by this process or service' - // this is used as the value for check.target (aka host) - // default: 'hostname':'program name' - // note: for a persistent instance that is ephemeral or transient where metric continuity is - // desired set this explicitly so that the current hostname will not be used. - cmc.CheckManager.Check.InstanceID = "" - // Search tag - a specific tag which, when coupled with the instanceId serves to identify the - // origin and/or grouping of the metrics - // default: service:application name (e.g. service:consul) - cmc.CheckManager.Check.SearchTag = "" - // Check secret, default: generated when a check needs to be created - cmc.CheckManager.Check.Secret = "" - // Check tags, array of strings, additional tags to add to a new check, default: none - //cmc.CheckManager.Check.Tags = []string{"category:tagname"} - // max amount of time to to hold on to a submission url - // when a given submission fails (due to retries) if the - // time the url was last updated is > than this, the trap - // url will be refreshed (e.g. if the broker is changed - // in the UI) default 5 minutes - cmc.CheckManager.Check.MaxURLAge = "5m" - // custom display name for check, default: "InstanceId /cgm" - cmc.CheckManager.Check.DisplayName = "" - // force metric activation - if a metric has been disabled via the UI - // the default behavior is to *not* re-activate the metric; this setting - // overrides the behavior and will re-activate the metric when it is - // encountered. "(true|false)", default "false" - cmc.CheckManager.Check.ForceMetricActivation = "false" - - // Broker configuration options - // - // Broker ID of specific broker to use, default: random enterprise broker or - // Circonus default if no enterprise brokers are available. - // default: only used if set - cmc.CheckManager.Broker.ID = "" - // used to select a broker with the same tag (e.g. can be used to dictate that a broker - // serving a specific location should be used. "dc:sfo", "location:new_york", "zone:us-west") - // if more than one broker has the tag, one will be selected randomly from the resulting list - // default: not used unless != "" - cmc.CheckManager.Broker.SelectTag = "" - // longest time to wait for a broker connection (if latency is > the broker will - // be considered invalid and not available for selection.), default: 500 milliseconds - cmc.CheckManager.Broker.MaxResponseTime = "500ms" - // if broker Id or SelectTag are not specified, a broker will be selected randomly - // from the list of brokers available to the api token. enterprise brokers take precedence - // viable brokers are "active", have the "httptrap" module enabled, are reachable and respond - // within MaxResponseTime. - - log.Println("Creating new cgm instance") - - metrics, err := cgm.NewCirconusMetrics(cmc) - if err != nil { - panic(err) - } - - src := rand.NewSource(time.Now().UnixNano()) - rnd := rand.New(src) - - log.Println("Starting cgm internal auto-flush timer") - metrics.Start() - - log.Println("Starting to send metrics") - - // number of "sets" of metrics to send (a minute worth) - max := 60 - - for i := 1; i < max; i++ { - log.Printf("\tmetric set %d of %d", i, 60) - metrics.Timing("ding", rnd.Float64()*10) - metrics.Increment("dong") - metrics.Gauge("dang", 10) - time.Sleep(1000 * time.Millisecond) - } - - log.Println("Flushing any outstanding metrics manually") - metrics.Flush() - -} -``` - -### HTTP Handler wrapping - -``` -http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func)) -``` - -### HTTP latency example - -``` -package main - -import ( - "os" - "fmt" - "net/http" - cgm "github.com/circonus-labs/circonus-gometrics" -) - -func main() { - cmc := &cgm.Config{} - cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") - - metrics, err := cgm.NewCirconusMetrics(cmc) - if err != nil { - panic(err) - } - metrics.Start() - - http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:]) - })) - http.ListenAndServe(":8080", http.DefaultServeMux) -} - -``` - -Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go deleted file mode 100644 index 7a7f18708..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package api provides methods for interacting with the Circonus API -package api - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/hashicorp/go-retryablehttp" -) - -const ( - // a few sensible defaults - defaultAPIURL = "https://api.circonus.com/v2" - defaultAPIApp = "circonus-gometrics" - minRetryWait = 10 * time.Millisecond - maxRetryWait = 50 * time.Millisecond - maxRetries = 3 -) - -// TokenKeyType - Circonus API Token key -type TokenKeyType string - -// TokenAppType - Circonus API Token app name -type TokenAppType string - -// IDType Circonus object id (numeric portion of cid) -type IDType int - -// CIDType Circonus object cid -type CIDType string - -// URLType submission url type -type URLType string - -// SearchQueryType search query -type SearchQueryType string - -// SearchTagType search/select tag type -type SearchTagType string - -// Config options for Circonus API -type Config struct { - URL string - TokenKey string - TokenApp string - Log *log.Logger - Debug bool -} - -// API Circonus API -type API struct { - apiURL *url.URL - key TokenKeyType - app TokenAppType - Debug bool - Log *log.Logger -} - -// NewAPI returns a new Circonus API -func NewAPI(ac *Config) (*API, error) { - - if ac == nil { - return nil, errors.New("Invalid API configuration (nil)") - } - - key := TokenKeyType(ac.TokenKey) - if key == "" { - return nil, errors.New("API Token is required") - } - - app := TokenAppType(ac.TokenApp) - if app == "" { - app = defaultAPIApp - } - - au := string(ac.URL) - if au == "" { - au = defaultAPIURL - } - if !strings.Contains(au, "/") { - // if just a hostname is passed, ASSume "https" and a path prefix of "/v2" - au = fmt.Sprintf("https://%s/v2", ac.URL) - } - if last := len(au) - 1; last >= 0 && au[last] == '/' { - au = au[:last] - } - apiURL, err := url.Parse(au) - if err != nil { - return nil, err - } - - a := &API{apiURL, key, app, ac.Debug, ac.Log} - - if a.Log == nil { - if a.Debug { - a.Log = log.New(os.Stderr, "", log.LstdFlags) - } else { - a.Log = log.New(ioutil.Discard, "", log.LstdFlags) - } - } - - return a, nil -} - -// Get API request -func (a *API) Get(reqPath string) ([]byte, error) { - return a.apiCall("GET", reqPath, nil) -} - -// Delete API request -func (a *API) Delete(reqPath string) ([]byte, error) { - return a.apiCall("DELETE", reqPath, nil) -} - -// Post API request -func (a *API) Post(reqPath string, data []byte) ([]byte, error) { - return a.apiCall("POST", reqPath, data) -} - -// Put API request -func (a *API) Put(reqPath string, data []byte) ([]byte, error) { - return a.apiCall("PUT", reqPath, data) -} - -// apiCall call Circonus API -func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, error) { - dataReader := bytes.NewReader(data) - reqURL := a.apiURL.String() - - if reqPath[:1] != "/" { - reqURL += "/" - } - if reqPath[:3] == "/v2" { - reqURL += reqPath[3:len(reqPath)] - } else { - reqURL += reqPath - } - - req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader) - if err != nil { - return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err) - } - req.Header.Add("Accept", "application/json") - req.Header.Add("X-Circonus-Auth-Token", string(a.key)) - req.Header.Add("X-Circonus-App-Name", string(a.app)) - - // keep last HTTP error in the event of retry failure - var lastHTTPError error - retryPolicy := func(resp *http.Response, err error) (bool, error) { - if err != nil { - lastHTTPError = err - return true, err - } - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. This will catch - // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || resp.StatusCode >= 500 { - defer resp.Body.Close() - body, readErr := ioutil.ReadAll(resp.Body) - if readErr != nil { - lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr) - } else { - lastHTTPError = fmt.Errorf("- last HTTP error: %d %s", resp.StatusCode, string(body)) - } - return true, nil - } - return false, nil - } - - client := retryablehttp.NewClient() - client.RetryWaitMin = minRetryWait - client.RetryWaitMax = maxRetryWait - client.RetryMax = maxRetries - client.Logger = a.Log - client.CheckRetry = retryPolicy - - resp, err := client.Do(req) - if err != nil { - if lastHTTPError != nil { - return nil, fmt.Errorf("[ERROR] fetching: %+v %+v", err, lastHTTPError) - } - return nil, fmt.Errorf("[ERROR] fetching %s: %+v", reqURL, err) - } - - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("[ERROR] reading body %+v", err) - } - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - msg := fmt.Sprintf("API response code %d: %s", resp.StatusCode, string(body)) - if a.Debug { - a.Log.Printf("[DEBUG] %s\n", msg) - } - - return nil, fmt.Errorf("[ERROR] %s", msg) - } - - return body, nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go deleted file mode 100644 index 87e2b5e37..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package api - -import ( - "encoding/json" - "fmt" -) - -// BrokerDetail instance attributes -type BrokerDetail struct { - CN string `json:"cn"` - IP string `json:"ipaddress"` - MinVer int `json:"minimum_version_required"` - Modules []string `json:"modules"` - Port int `json:"port"` - Skew string `json:"skew"` - Status string `json:"status"` - Version int `json:"version"` -} - -// Broker definition -type Broker struct { - Cid string `json:"_cid"` - Details []BrokerDetail `json:"_details"` - Latitude string `json:"_latitude"` - Longitude string `json:"_longitude"` - Name string `json:"_name"` - Tags []string `json:"_tags"` - Type string `json:"_type"` -} - -// FetchBrokerByID fetch a broker configuration by [group]id -func (a *API) FetchBrokerByID(id IDType) (*Broker, error) { - cid := CIDType(fmt.Sprintf("/broker/%d", id)) - return a.FetchBrokerByCID(cid) -} - -// FetchBrokerByCID fetch a broker configuration by cid -func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) { - result, err := a.Get(string(cid)) - if err != nil { - return nil, err - } - - response := new(Broker) - if err := json.Unmarshal(result, &response); err != nil { - return nil, err - } - - return response, nil - -} - -// FetchBrokerListByTag return list of brokers with a specific tag -func (a *API) FetchBrokerListByTag(searchTag SearchTagType) ([]Broker, error) { - query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", searchTag)) - return a.BrokerSearch(query) -} - -// BrokerSearch return a list of brokers matching a query/filter -func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) { - queryURL := fmt.Sprintf("/broker?%s", string(query)) - - result, err := a.Get(queryURL) - if err != nil { - return nil, err - } - - var brokers []Broker - json.Unmarshal(result, &brokers) - - return brokers, nil -} - -// FetchBrokerList return list of all brokers available to the api token/app -func (a *API) FetchBrokerList() ([]Broker, error) { - result, err := a.Get("/broker") - if err != nil { - return nil, err - } - - var response []Broker - json.Unmarshal(result, &response) - - return response, nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go deleted file mode 100644 index df0524f43..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package api - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" -) - -// CheckDetails is an arbitrary json structure, we would only care about submission_url -type CheckDetails struct { - SubmissionURL string `json:"submission_url"` -} - -// Check definition -type Check struct { - Cid string `json:"_cid"` - Active bool `json:"_active"` - BrokerCid string `json:"_broker"` - CheckBundleCid string `json:"_check_bundle"` - CheckUUID string `json:"_check_uuid"` - Details CheckDetails `json:"_details"` -} - -// FetchCheckByID fetch a check configuration by id -func (a *API) FetchCheckByID(id IDType) (*Check, error) { - cid := CIDType(fmt.Sprintf("/check/%d", int(id))) - return a.FetchCheckByCID(cid) -} - -// FetchCheckByCID fetch a check configuration by cid -func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) { - result, err := a.Get(string(cid)) - if err != nil { - return nil, err - } - - check := new(Check) - json.Unmarshal(result, check) - - return check, nil -} - -// FetchCheckBySubmissionURL fetch a check configuration by submission_url -func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) { - - u, err := url.Parse(string(submissionURL)) - if err != nil { - return nil, err - } - - // valid trap url: scheme://host[:port]/module/httptrap/UUID/secret - - // does it smell like a valid trap url path - if u.Path[:17] != "/module/httptrap/" { - return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL) - } - - // extract uuid/secret - pathParts := strings.Split(u.Path[17:len(u.Path)], "/") - if len(pathParts) != 2 { - return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL) - } - - uuid := pathParts[0] - - query := SearchQueryType(fmt.Sprintf("f__check_uuid=%s", uuid)) - - checks, err := a.CheckSearch(query) - if err != nil { - return nil, err - } - - if len(checks) == 0 { - return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid) - } - - numActive := 0 - checkID := -1 - - for idx, check := range checks { - if check.Active { - numActive++ - checkID = idx - } - } - - if numActive > 1 { - return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid) - } - - return &checks[checkID], nil - -} - -// CheckSearch returns a list of checks matching a query/filter -func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) { - queryURL := fmt.Sprintf("/check?%s", string(query)) - - result, err := a.Get(queryURL) - if err != nil { - return nil, err - } - - var checks []Check - json.Unmarshal(result, &checks) - - return checks, nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go deleted file mode 100644 index 559df7ac0..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package api - -import ( - "encoding/json" - "fmt" -) - -// CheckBundleConfig configuration specific to check type -type CheckBundleConfig struct { - AsyncMetrics bool `json:"async_metrics"` - Secret string `json:"secret"` - SubmissionURL string `json:"submission_url"` -} - -// CheckBundleMetric individual metric configuration -type CheckBundleMetric struct { - Name string `json:"name"` - Type string `json:"type"` - Units string `json:"units"` - Status string `json:"status"` -} - -// CheckBundle definition -type CheckBundle struct { - CheckUUIDs []string `json:"_check_uuids,omitempty"` - Checks []string `json:"_checks,omitempty"` - Cid string `json:"_cid,omitempty"` - Created int `json:"_created,omitempty"` - LastModified int `json:"_last_modified,omitempty"` - LastModifedBy string `json:"_last_modifed_by,omitempty"` - ReverseConnectUrls []string `json:"_reverse_connection_urls,omitempty"` - Brokers []string `json:"brokers"` - Config CheckBundleConfig `json:"config"` - DisplayName string `json:"display_name"` - Metrics []CheckBundleMetric `json:"metrics"` - MetricLimit int `json:"metric_limit"` - Notes string `json:"notes"` - Period int `json:"period"` - Status string `json:"status"` - Tags []string `json:"tags"` - Target string `json:"target"` - Timeout int `json:"timeout"` - Type string `json:"type"` -} - -// FetchCheckBundleByID fetch a check bundle configuration by id -func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) { - cid := CIDType(fmt.Sprintf("/check_bundle/%d", id)) - return a.FetchCheckBundleByCID(cid) -} - -// FetchCheckBundleByCID fetch a check bundle configuration by id -func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) { - result, err := a.Get(string(cid)) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - json.Unmarshal(result, checkBundle) - - return checkBundle, nil -} - -// CheckBundleSearch returns list of check bundles matching a search query -// - a search query not a filter (see: https://login.circonus.com/resources/api#searching) -func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) { - apiPath := fmt.Sprintf("/check_bundle?search=%s", searchCriteria) - - response, err := a.Get(apiPath) - if err != nil { - return nil, fmt.Errorf("[ERROR] API call error %+v", err) - } - - var results []CheckBundle - err = json.Unmarshal(response, &results) - if err != nil { - return nil, fmt.Errorf("[ERROR] Parsing JSON response %+v", err) - } - - return results, nil -} - -// CreateCheckBundle create a new check bundle (check) -func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) { - cfgJSON, err := json.Marshal(config) - if err != nil { - return nil, err - } - - response, err := a.Post("/check_bundle", cfgJSON) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - err = json.Unmarshal(response, checkBundle) - if err != nil { - return nil, err - } - - return checkBundle, nil -} - -// UpdateCheckBundle updates a check bundle configuration -func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) { - if a.Debug { - a.Log.Printf("[DEBUG] Updating check bundle with new metrics.") - } - - cfgJSON, err := json.Marshal(config) - if err != nil { - return nil, err - } - - response, err := a.Put(config.Cid, cfgJSON) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - err = json.Unmarshal(response, checkBundle) - if err != nil { - return nil, err - } - - return checkBundle, nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go deleted file mode 100644 index b23486ce2..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package checkmgr - -import ( - "fmt" - "math/rand" - "net" - "net/url" - "reflect" - "strings" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// Get Broker to use when creating a check -func (cm *CheckManager) getBroker() (*api.Broker, error) { - if cm.brokerID != 0 { - broker, err := cm.apih.FetchBrokerByID(cm.brokerID) - if err != nil { - return nil, err - } - if !cm.isValidBroker(broker) { - return nil, fmt.Errorf( - "[ERROR] designated broker %d [%s] is invalid (not active, does not support required check type, or connectivity issue)", - cm.brokerID, - broker.Name) - } - return broker, nil - } - broker, err := cm.selectBroker() - if err != nil { - return nil, fmt.Errorf("[ERROR] Unable to fetch suitable broker %s", err) - } - return broker, nil -} - -// Get CN of Broker associated with submission_url to satisfy no IP SANS in certs -func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLType) (string, error) { - u, err := url.Parse(string(submissionURL)) - if err != nil { - return "", err - } - - hostParts := strings.Split(u.Host, ":") - host := hostParts[0] - - if net.ParseIP(host) == nil { // it's a non-ip string - return u.Host, nil - } - - cn := "" - - for _, detail := range broker.Details { - if detail.IP == host { - cn = detail.CN - break - } - } - - if cn == "" { - return "", fmt.Errorf("[ERROR] Unable to match URL host (%s) to Broker", u.Host) - } - - return cn, nil - -} - -// Select a broker for use when creating a check, if a specific broker -// was not specified. -func (cm *CheckManager) selectBroker() (*api.Broker, error) { - var brokerList []api.Broker - var err error - - if cm.brokerSelectTag != "" { - brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag) - if err != nil { - return nil, err - } - } else { - brokerList, err = cm.apih.FetchBrokerList() - if err != nil { - return nil, err - } - } - - if len(brokerList) == 0 { - return nil, fmt.Errorf("zero brokers found") - } - - validBrokers := make(map[string]api.Broker) - haveEnterprise := false - - for _, broker := range brokerList { - if cm.isValidBroker(&broker) { - validBrokers[broker.Cid] = broker - if broker.Type == "enterprise" { - haveEnterprise = true - } - } - } - - if haveEnterprise { // eliminate non-enterprise brokers from valid brokers - for k, v := range validBrokers { - if v.Type != "enterprise" { - delete(validBrokers, k) - } - } - } - - if len(validBrokers) == 0 { - return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList)) - } - - validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys() - selectedBroker := validBrokers[validBrokerKeys[rand.Intn(len(validBrokerKeys))].String()] - - if cm.Debug { - cm.Log.Printf("[DEBUG] Selected broker '%s'\n", selectedBroker.Name) - } - - return &selectedBroker, nil - -} - -// Verify broker supports the check type to be used -func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool { - - for _, module := range details.Modules { - if CheckTypeType(module) == checkType { - return true - } - } - - return false - -} - -// Is the broker valid (active, supports check type, and reachable) -func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { - brokerPort := 0 - valid := false - for _, detail := range broker.Details { - brokerPort = 43191 - - // broker must be active - if detail.Status != statusActive { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' is not active.\n", broker.Name) - } - continue - } - - // broker must have module loaded for the check type to be used - if !cm.brokerSupportsCheckType(cm.checkType, &detail) { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' does not support '%s' checks.\n", broker.Name, cm.checkType) - } - continue - } - - // broker must be reachable and respond within designated time - conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.IP, brokerPort), cm.brokerMaxResponseTime) - if err != nil { - if detail.CN != "trap.noit.circonus.net" { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' unable to connect, %v\n", broker.Name, err) - } - continue // not able to reach the broker (or respone slow enough for it to be considered not usable) - } - // if circonus trap broker, try port 443 - brokerPort = 443 - conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.CN, brokerPort), cm.brokerMaxResponseTime) - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' unable to connect %v\n", broker.Name, err) - } - continue // not able to reach the broker on 443 either (or respone slow enough for it to be considered not usable) - } - } - conn.Close() - - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name) - } - - valid = true - break - - } - return valid -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go deleted file mode 100644 index 3f173bf5c..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package checkmgr - -import ( - "crypto/x509" - "encoding/json" - "fmt" -) - -// Default Circonus CA certificate -var circonusCA = []byte(`-----BEGIN CERTIFICATE----- -MIID4zCCA0ygAwIBAgIJAMelf8skwVWPMA0GCSqGSIb3DQEBBQUAMIGoMQswCQYD -VQQGEwJVUzERMA8GA1UECBMITWFyeWxhbmQxETAPBgNVBAcTCENvbHVtYmlhMRcw -FQYDVQQKEw5DaXJjb251cywgSW5jLjERMA8GA1UECxMIQ2lyY29udXMxJzAlBgNV -BAMTHkNpcmNvbnVzIENlcnRpZmljYXRlIEF1dGhvcml0eTEeMBwGCSqGSIb3DQEJ -ARYPY2FAY2lyY29udXMubmV0MB4XDTA5MTIyMzE5MTcwNloXDTE5MTIyMTE5MTcw -NlowgagxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMI -Q29sdW1iaWExFzAVBgNVBAoTDkNpcmNvbnVzLCBJbmMuMREwDwYDVQQLEwhDaXJj -b251czEnMCUGA1UEAxMeQ2lyY29udXMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4w -HAYJKoZIhvcNAQkBFg9jYUBjaXJjb251cy5uZXQwgZ8wDQYJKoZIhvcNAQEBBQAD -gY0AMIGJAoGBAKz2X0/0vJJ4ad1roehFyxUXHdkjJA9msEKwT2ojummdUB3kK5z6 -PDzDL9/c65eFYWqrQWVWZSLQK1D+v9xJThCe93v6QkSJa7GZkCq9dxClXVtBmZH3 -hNIZZKVC6JMA9dpRjBmlFgNuIdN7q5aJsv8VZHH+QrAyr9aQmhDJAmk1AgMBAAGj -ggERMIIBDTAdBgNVHQ4EFgQUyNTsgZHSkhhDJ5i+6IFlPzKYxsUwgd0GA1UdIwSB -1TCB0oAUyNTsgZHSkhhDJ5i+6IFlPzKYxsWhga6kgaswgagxCzAJBgNVBAYTAlVT -MREwDwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQ29sdW1iaWExFzAVBgNVBAoT -DkNpcmNvbnVzLCBJbmMuMREwDwYDVQQLEwhDaXJjb251czEnMCUGA1UEAxMeQ2ly -Y29udXMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9jYUBj -aXJjb251cy5uZXSCCQDHpX/LJMFVjzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB -BQUAA4GBAAHBtl15BwbSyq0dMEBpEdQYhHianU/rvOMe57digBmox7ZkPEbB/baE -sYJysziA2raOtRxVRtcxuZSMij2RiJDsLxzIp1H60Xhr8lmf7qF6Y+sZl7V36KZb -n2ezaOoRtsQl9dhqEMe8zgL76p9YZ5E69Al0mgiifTteyNjjMuIW ------END CERTIFICATE-----`) - -// CACert contains cert returned from Circonus API -type CACert struct { - Contents string `json:"contents"` -} - -// loadCACert loads the CA cert for the broker designated by the submission url -func (cm *CheckManager) loadCACert() { - if cm.certPool != nil { - return - } - - cm.certPool = x509.NewCertPool() - - cert, err := cm.fetchCert() - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err) - } - } - - if cert == nil { - cert = circonusCA - } - - cm.certPool.AppendCertsFromPEM(cert) -} - -// fetchCert fetches CA certificate using Circonus API -func (cm *CheckManager) fetchCert() ([]byte, error) { - if !cm.enabled { - return circonusCA, nil - } - - response, err := cm.apih.Get("/pki/ca.crt") - if err != nil { - return nil, err - } - - cadata := new(CACert) - err = json.Unmarshal(response, cadata) - if err != nil { - return nil, err - } - - if cadata.Contents == "" { - return nil, fmt.Errorf("[ERROR] Unable to find ca cert %+v", cadata) - } - - return []byte(cadata.Contents), nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go deleted file mode 100644 index 56f6fed28..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package checkmgr - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" -) - -// Initialize CirconusMetrics instance. Attempt to find a check otherwise create one. -// use cases: -// -// check [bundle] by submission url -// check [bundle] by *check* id (note, not check_bundle id) -// check [bundle] by search -// create check [bundle] -func (cm *CheckManager) initializeTrapURL() error { - if cm.trapURL != "" { - return nil - } - - cm.trapmu.Lock() - defer cm.trapmu.Unlock() - - if cm.checkSubmissionURL != "" { - if !cm.enabled { - cm.trapURL = cm.checkSubmissionURL - cm.trapLastUpdate = time.Now() - return nil - } - } - - if !cm.enabled { - return errors.New("Unable to initialize trap, check manager is disabled.") - } - - var err error - var check *api.Check - var checkBundle *api.CheckBundle - var broker *api.Broker - - if cm.checkSubmissionURL != "" { - check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL) - if err != nil { - return err - } - // extract check id from check object returned from looking up using submission url - // set m.CheckId to the id - // set m.SubmissionUrl to "" to prevent trying to search on it going forward - // use case: if the broker is changed in the UI metrics would stop flowing - // unless the new submission url can be fetched with the API (which is no - // longer possible using the original submission url) - var id int - id, err = strconv.Atoi(strings.Replace(check.Cid, "/check/", "", -1)) - if err == nil { - cm.checkID = api.IDType(id) - cm.checkSubmissionURL = "" - } else { - cm.Log.Printf( - "[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\n", - check.Cid, err) - } - } else if cm.checkID > 0 { - check, err = cm.apih.FetchCheckByID(cm.checkID) - if err != nil { - return err - } - } else { - searchCriteria := fmt.Sprintf( - "(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)", - cm.checkInstanceID, cm.checkType, cm.checkSearchTag) - checkBundle, err = cm.checkBundleSearch(searchCriteria) - if err != nil { - return err - } - - if checkBundle == nil { - // err==nil && checkBundle==nil is "no check bundles matched" - // an error *should* be returned for any other invalid scenario - checkBundle, broker, err = cm.createNewCheck() - if err != nil { - return err - } - } - } - - if checkBundle == nil { - if check != nil { - checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCid)) - if err != nil { - return err - } - } else { - return fmt.Errorf("[ERROR] Unable to retrieve, find, or create check") - } - } - - if broker == nil { - broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0])) - if err != nil { - return err - } - } - - // retain to facilitate metric management (adding new metrics specifically) - cm.checkBundle = checkBundle - cm.inventoryMetrics() - - // url to which metrics should be PUT - cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL) - - // used when sending as "ServerName" get around certs not having IP SANS - // (cert created with server name as CN but IP used in trap url) - cn, err := cm.getBrokerCN(broker, cm.trapURL) - if err != nil { - return err - } - cm.trapCN = BrokerCNType(cn) - - cm.trapLastUpdate = time.Now() - - return nil -} - -// Search for a check bundle given a predetermined set of criteria -func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, error) { - checkBundles, err := cm.apih.CheckBundleSearch(api.SearchQueryType(criteria)) - if err != nil { - return nil, err - } - - if len(checkBundles) == 0 { - return nil, nil // trigger creation of a new check - } - - numActive := 0 - checkID := -1 - - for idx, check := range checkBundles { - if check.Status == statusActive { - numActive++ - checkID = idx - } - } - - if numActive > 1 { - return nil, fmt.Errorf("[ERROR] Multiple possibilities multiple check bundles match criteria %s\n", criteria) - } - - return &checkBundles[checkID], nil -} - -// Create a new check to receive metrics -func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) { - checkSecret := string(cm.checkSecret) - if checkSecret == "" { - secret, err := cm.makeSecret() - if err != nil { - secret = "myS3cr3t" - } - checkSecret = secret - } - - broker, err := cm.getBroker() - if err != nil { - return nil, nil, err - } - - config := api.CheckBundle{ - Brokers: []string{broker.Cid}, - Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret}, - DisplayName: string(cm.checkDisplayName), - Metrics: []api.CheckBundleMetric{}, - MetricLimit: 0, - Notes: "", - Period: 60, - Status: statusActive, - Tags: append([]string{string(cm.checkSearchTag)}, cm.checkTags...), - Target: string(cm.checkInstanceID), - Timeout: 10, - Type: string(cm.checkType), - } - - checkBundle, err := cm.apih.CreateCheckBundle(config) - if err != nil { - return nil, nil, err - } - - return checkBundle, broker, nil -} - -// Create a dynamic secret to use with a new check -func (cm *CheckManager) makeSecret() (string, error) { - hash := sha256.New() - x := make([]byte, 2048) - if _, err := rand.Read(x); err != nil { - return "", err - } - hash.Write(x) - return hex.EncodeToString(hash.Sum(nil))[0:16], nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go deleted file mode 100644 index e23f00a57..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package checkmgr provides a check management interace to circonus-gometrics -package checkmgr - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "log" - "net/url" - "os" - "path" - "strconv" - "sync" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" -) - -// Check management offers: -// -// Create a check if one cannot be found matching specific criteria -// Manage metrics in the supplied check (enabling new metrics as they are submitted) -// -// To disable check management, leave Config.Api.Token.Key blank -// -// use cases: -// configure without api token - check management disabled -// - configuration parameters other than Check.SubmissionUrl, Debug and Log are ignored -// - note: SubmissionUrl is **required** in this case as there is no way to derive w/o api -// configure with api token - check management enabled -// - all otehr configuration parameters affect how the trap url is obtained -// 1. provided (Check.SubmissionUrl) -// 2. via check lookup (CheckConfig.Id) -// 3. via a search using CheckConfig.InstanceId + CheckConfig.SearchTag -// 4. a new check is created - -const ( - defaultCheckType = "httptrap" - defaultTrapMaxURLAge = "60s" // 60 seconds - defaultBrokerMaxResponseTime = "500ms" // 500 milliseconds - defaultForceMetricActivation = "false" - statusActive = "active" -) - -// CheckConfig options for check -type CheckConfig struct { - // a specific submission url - SubmissionURL string - // a specific check id (not check bundle id) - ID string - // unique instance id string - // used to search for a check to use - // used as check.target when creating a check - InstanceID string - // unique check searching tag - // used to search for a check to use (combined with instanceid) - // used as a regular tag when creating a check - SearchTag string - // a custom display name for the check (as viewed in UI Checks) - DisplayName string - // httptrap check secret (for creating a check) - Secret string - // additional tags to add to a check (when creating a check) - // these tags will not be added to an existing check - Tags []string - // max amount of time to to hold on to a submission url - // when a given submission fails (due to retries) if the - // time the url was last updated is > than this, the trap - // url will be refreshed (e.g. if the broker is changed - // in the UI) **only relevant when check management is enabled** - // e.g. 5m, 30m, 1h, etc. - MaxURLAge string - // force metric activation - if a metric has been disabled via the UI - // the default behavior is to *not* re-activate the metric; this setting - // overrides the behavior and will re-activate the metric when it is - // encountered. "(true|false)", default "false" - ForceMetricActivation string -} - -// BrokerConfig options for broker -type BrokerConfig struct { - // a specific broker id (numeric portion of cid) - ID string - // a tag that can be used to select 1-n brokers from which to select - // when creating a new check (e.g. datacenter:abc) - SelectTag string - // for a broker to be considered viable it must respond to a - // connection attempt within this amount of time e.g. 200ms, 2s, 1m - MaxResponseTime string -} - -// Config options -type Config struct { - Log *log.Logger - Debug bool - - // Circonus API config - API api.Config - // Check specific configuration options - Check CheckConfig - // Broker specific configuration options - Broker BrokerConfig -} - -// CheckTypeType check type -type CheckTypeType string - -// CheckInstanceIDType check instance id -type CheckInstanceIDType string - -// CheckSecretType check secret -type CheckSecretType string - -// CheckTagsType check tags -type CheckTagsType []string - -// CheckDisplayNameType check display name -type CheckDisplayNameType string - -// BrokerCNType broker common name -type BrokerCNType string - -// CheckManager settings -type CheckManager struct { - enabled bool - Log *log.Logger - Debug bool - apih *api.API - - // check - checkType CheckTypeType - checkID api.IDType - checkInstanceID CheckInstanceIDType - checkSearchTag api.SearchTagType - checkSecret CheckSecretType - checkTags CheckTagsType - checkSubmissionURL api.URLType - checkDisplayName CheckDisplayNameType - forceMetricActivation bool - - // broker - brokerID api.IDType - brokerSelectTag api.SearchTagType - brokerMaxResponseTime time.Duration - - // state - checkBundle *api.CheckBundle - availableMetrics map[string]bool - trapURL api.URLType - trapCN BrokerCNType - trapLastUpdate time.Time - trapMaxURLAge time.Duration - trapmu sync.Mutex - certPool *x509.CertPool -} - -// Trap config -type Trap struct { - URL *url.URL - TLS *tls.Config -} - -// NewCheckManager returns a new check manager -func NewCheckManager(cfg *Config) (*CheckManager, error) { - - if cfg == nil { - return nil, errors.New("Invalid Check Manager configuration (nil).") - } - - cm := &CheckManager{ - enabled: false, - } - - cm.Debug = cfg.Debug - - cm.Log = cfg.Log - if cm.Log == nil { - if cm.Debug { - cm.Log = log.New(os.Stderr, "", log.LstdFlags) - } else { - cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) - } - } - - if cfg.Check.SubmissionURL != "" { - cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL) - } - // Blank API Token *disables* check management - if cfg.API.TokenKey == "" { - if cm.checkSubmissionURL == "" { - return nil, errors.New("Invalid check manager configuration (no API token AND no submission url).") - } - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } - return cm, nil - } - - // enable check manager - - cm.enabled = true - - // initialize api handle - - cfg.API.Debug = cm.Debug - cfg.API.Log = cm.Log - - apih, err := api.NewAPI(&cfg.API) - if err != nil { - return nil, err - } - cm.apih = apih - - // initialize check related data - - cm.checkType = defaultCheckType - - idSetting := "0" - if cfg.Check.ID != "" { - idSetting = cfg.Check.ID - } - id, err := strconv.Atoi(idSetting) - if err != nil { - return nil, err - } - cm.checkID = api.IDType(id) - - cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID) - cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName) - cm.checkSearchTag = api.SearchTagType(cfg.Check.SearchTag) - cm.checkSecret = CheckSecretType(cfg.Check.Secret) - cm.checkTags = cfg.Check.Tags - - fma := defaultForceMetricActivation - if cfg.Check.ForceMetricActivation != "" { - fma = cfg.Check.ForceMetricActivation - } - fm, err := strconv.ParseBool(fma) - if err != nil { - return nil, err - } - cm.forceMetricActivation = fm - - _, an := path.Split(os.Args[0]) - hn, err := os.Hostname() - if err != nil { - hn = "unknown" - } - if cm.checkInstanceID == "" { - cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an)) - } - - if cm.checkSearchTag == "" { - cm.checkSearchTag = api.SearchTagType(fmt.Sprintf("service:%s", an)) - } - - if cm.checkDisplayName == "" { - cm.checkDisplayName = CheckDisplayNameType(fmt.Sprintf("%s /cgm", string(cm.checkInstanceID))) - } - - dur := cfg.Check.MaxURLAge - if dur == "" { - dur = defaultTrapMaxURLAge - } - maxDur, err := time.ParseDuration(dur) - if err != nil { - return nil, err - } - cm.trapMaxURLAge = maxDur - - // setup broker - - idSetting = "0" - if cfg.Broker.ID != "" { - idSetting = cfg.Broker.ID - } - id, err = strconv.Atoi(idSetting) - if err != nil { - return nil, err - } - cm.brokerID = api.IDType(id) - - cm.brokerSelectTag = api.SearchTagType(cfg.Broker.SelectTag) - - dur = cfg.Broker.MaxResponseTime - if dur == "" { - dur = defaultBrokerMaxResponseTime - } - maxDur, err = time.ParseDuration(dur) - if err != nil { - return nil, err - } - cm.brokerMaxResponseTime = maxDur - - // metrics - cm.availableMetrics = make(map[string]bool) - - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } - - return cm, nil -} - -// GetTrap return the trap url -func (cm *CheckManager) GetTrap() (*Trap, error) { - if cm.trapURL == "" { - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } - } - - trap := &Trap{} - - u, err := url.Parse(string(cm.trapURL)) - if err != nil { - return nil, err - } - - trap.URL = u - - if u.Scheme == "https" { - if cm.certPool == nil { - cm.loadCACert() - } - t := &tls.Config{ - RootCAs: cm.certPool, - } - if cm.trapCN != "" { - t.ServerName = string(cm.trapCN) - } - trap.TLS = t - } - - return trap, nil -} - -// ResetTrap URL, force request to the API for the submission URL and broker ca cert -func (cm *CheckManager) ResetTrap() error { - if cm.trapURL == "" { - return nil - } - - cm.trapURL = "" - cm.certPool = nil - err := cm.initializeTrapURL() - return err -} - -// RefreshTrap check when the last time the URL was reset, reset if needed -func (cm *CheckManager) RefreshTrap() { - if cm.trapURL == "" { - return - } - - if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge { - cm.ResetTrap() - } -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go deleted file mode 100644 index 8f03d4476..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package checkmgr - -import ( - "github.com/circonus-labs/circonus-gometrics/api" -) - -// IsMetricActive checks whether a given metric name is currently active(enabled) -func (cm *CheckManager) IsMetricActive(name string) bool { - active, _ := cm.availableMetrics[name] - return active -} - -// ActivateMetric determines if a given metric should be activated -func (cm *CheckManager) ActivateMetric(name string) bool { - active, exists := cm.availableMetrics[name] - - if !exists { - return true - } - - if !active && cm.forceMetricActivation { - return true - } - - return false -} - -// AddNewMetrics updates a check bundle with new metrics -func (cm *CheckManager) AddNewMetrics(newMetrics map[string]*api.CheckBundleMetric) { - // only if check manager is enabled - if !cm.enabled { - return - } - - // only if checkBundle has been populated - if cm.checkBundle == nil { - return - } - - newCheckBundle := cm.checkBundle - numCurrMetrics := len(newCheckBundle.Metrics) - numNewMetrics := len(newMetrics) - - if numCurrMetrics+numNewMetrics >= cap(newCheckBundle.Metrics) { - nm := make([]api.CheckBundleMetric, numCurrMetrics+numNewMetrics) - copy(nm, newCheckBundle.Metrics) - newCheckBundle.Metrics = nm - } - - newCheckBundle.Metrics = newCheckBundle.Metrics[0 : numCurrMetrics+numNewMetrics] - - i := 0 - for _, metric := range newMetrics { - newCheckBundle.Metrics[numCurrMetrics+i] = *metric - i++ - } - - checkBundle, err := cm.apih.UpdateCheckBundle(newCheckBundle) - if err != nil { - cm.Log.Printf("[ERROR] updating check bundle with new metrics %v", err) - return - } - - cm.checkBundle = checkBundle - cm.inventoryMetrics() -} - -// inventoryMetrics creates list of active metrics in check bundle -func (cm *CheckManager) inventoryMetrics() { - availableMetrics := make(map[string]bool) - for _, metric := range cm.checkBundle.Metrics { - availableMetrics[metric.Name] = metric.Status == "active" - } - cm.availableMetrics = availableMetrics -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go deleted file mode 100644 index 34166c007..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package circonusgometrics provides instrumentation for your applications in the form -// of counters, gauges and histograms and allows you to publish them to -// Circonus -// -// Counters -// -// A counter is a monotonically-increasing, unsigned, 64-bit integer used to -// represent the number of times an event has occurred. By tracking the deltas -// between measurements of a counter over intervals of time, an aggregation -// layer can derive rates, acceleration, etc. -// -// Gauges -// -// A gauge returns instantaneous measurements of something using signed, 64-bit -// integers. This value does not need to be monotonic. -// -// Histograms -// -// A histogram tracks the distribution of a stream of values (e.g. the number of -// seconds it takes to handle requests). Circonus can calculate complex -// analytics on these. -// -// Reporting -// -// A period push to a Circonus httptrap is confgurable. -package circonusgometrics - -import ( - "errors" - "io/ioutil" - "log" - "os" - "sync" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/circonus-labs/circonus-gometrics/checkmgr" -) - -const ( - defaultFlushInterval = "10s" // 10 * time.Second -) - -// Config options for circonus-gometrics -type Config struct { - Log *log.Logger - Debug bool - - // API, Check and Broker configuration options - CheckManager checkmgr.Config - - // how frequenly to submit metrics to Circonus, default 10 seconds - Interval string -} - -// CirconusMetrics state -type CirconusMetrics struct { - Log *log.Logger - Debug bool - flushInterval time.Duration - flushing bool - flushmu sync.Mutex - check *checkmgr.CheckManager - - counters map[string]uint64 - cm sync.Mutex - - counterFuncs map[string]func() uint64 - cfm sync.Mutex - - gauges map[string]string - gm sync.Mutex - - gaugeFuncs map[string]func() int64 - gfm sync.Mutex - - histograms map[string]*Histogram - hm sync.Mutex - - text map[string]string - tm sync.Mutex - - textFuncs map[string]func() string - tfm sync.Mutex -} - -// NewCirconusMetrics returns a CirconusMetrics instance -func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) { - - if cfg == nil { - return nil, errors.New("Invalid configuration (nil).") - } - - cm := &CirconusMetrics{ - counters: make(map[string]uint64), - counterFuncs: make(map[string]func() uint64), - gauges: make(map[string]string), - gaugeFuncs: make(map[string]func() int64), - histograms: make(map[string]*Histogram), - text: make(map[string]string), - textFuncs: make(map[string]func() string), - } - - cm.Debug = cfg.Debug - if cm.Debug { - if cfg.Log == nil { - cm.Log = log.New(os.Stderr, "", log.LstdFlags) - } else { - cm.Log = cfg.Log - } - } - if cm.Log == nil { - cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) - } - - fi := defaultFlushInterval - if cfg.Interval != "" { - fi = cfg.Interval - } - - dur, err := time.ParseDuration(fi) - if err != nil { - return nil, err - } - cm.flushInterval = dur - - cfg.CheckManager.Debug = cm.Debug - cfg.CheckManager.Log = cm.Log - - check, err := checkmgr.NewCheckManager(&cfg.CheckManager) - if err != nil { - return nil, err - } - cm.check = check - - if _, err := cm.check.GetTrap(); err != nil { - return nil, err - } - - return cm, nil -} - -// Start initializes the CirconusMetrics instance based on -// configuration settings and sets the httptrap check url to -// which metrics should be sent. It then starts a perdiodic -// submission process of all metrics collected. -func (m *CirconusMetrics) Start() { - go func() { - for _ = range time.NewTicker(m.flushInterval).C { - m.Flush() - } - }() -} - -// Flush metrics kicks off the process of sending metrics to Circonus -func (m *CirconusMetrics) Flush() { - if m.flushing { - return - } - m.flushmu.Lock() - m.flushing = true - m.flushmu.Unlock() - - if m.Debug { - m.Log.Println("[DEBUG] Flushing metrics") - } - - // check for new metrics and enable them automatically - newMetrics := make(map[string]*api.CheckBundleMetric) - - counters, gauges, histograms, text := m.snapshot() - output := make(map[string]interface{}) - for name, value := range counters { - send := m.check.IsMetricActive(name) - if !send && m.check.ActivateMetric(name) { - send = true - newMetrics[name] = &api.CheckBundleMetric{ - Name: name, - Type: "numeric", - Status: "active", - } - } - if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } - } - } - - for name, value := range gauges { - send := m.check.IsMetricActive(name) - if !send && m.check.ActivateMetric(name) { - send = true - newMetrics[name] = &api.CheckBundleMetric{ - Name: name, - Type: "numeric", - Status: "active", - } - } - if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } - } - } - - for name, value := range histograms { - send := m.check.IsMetricActive(name) - if !send && m.check.ActivateMetric(name) { - send = true - newMetrics[name] = &api.CheckBundleMetric{ - Name: name, - Type: "histogram", - Status: "active", - } - } - if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value.DecStrings(), - } - } - } - - for name, value := range text { - send := m.check.IsMetricActive(name) - if !send && m.check.ActivateMetric(name) { - send = true - newMetrics[name] = &api.CheckBundleMetric{ - Name: name, - Type: "text", - Status: "active", - } - } - if send { - output[name] = map[string]interface{}{ - "_type": "s", - "_value": value, - } - } - } - - m.submit(output, newMetrics) - - m.flushmu.Lock() - m.flushing = false - m.flushmu.Unlock() -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go deleted file mode 100644 index 2b34961f1..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -// A Counter is a monotonically increasing unsigned integer. -// -// Use a counter to derive rates (e.g., record total number of requests, derive -// requests per second). - -// Increment counter by 1 -func (m *CirconusMetrics) Increment(metric string) { - m.Add(metric, 1) -} - -// IncrementByValue updates counter by supplied value -func (m *CirconusMetrics) IncrementByValue(metric string, val uint64) { - m.Add(metric, val) -} - -// Set a counter to specific value -func (m *CirconusMetrics) Set(metric string, val uint64) { - m.cm.Lock() - defer m.cm.Unlock() - m.counters[metric] = val -} - -// Add updates counter by supplied value -func (m *CirconusMetrics) Add(metric string, val uint64) { - m.cm.Lock() - defer m.cm.Unlock() - m.counters[metric] += val -} - -// RemoveCounter removes the named counter -func (m *CirconusMetrics) RemoveCounter(metric string) { - m.cm.Lock() - defer m.cm.Unlock() - delete(m.counters, metric) -} - -// SetCounterFunc set counter to a function [called at flush interval] -func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) { - m.cfm.Lock() - defer m.cfm.Unlock() - m.counterFuncs[metric] = fn -} - -// RemoveCounterFunc removes the named counter function -func (m *CirconusMetrics) RemoveCounterFunc(metric string) { - m.cfm.Lock() - defer m.cfm.Unlock() - delete(m.counterFuncs, metric) -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go deleted file mode 100644 index b44236959..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -// A Gauge is an instantaneous measurement of a value. -// -// Use a gauge to track metrics which increase and decrease (e.g., amount of -// free memory). - -import ( - "fmt" -) - -// Gauge sets a gauge to a value -func (m *CirconusMetrics) Gauge(metric string, val interface{}) { - m.SetGauge(metric, val) -} - -// SetGauge sets a gauge to a value -func (m *CirconusMetrics) SetGauge(metric string, val interface{}) { - m.gm.Lock() - defer m.gm.Unlock() - m.gauges[metric] = m.gaugeValString(val) -} - -// RemoveGauge removes a gauge -func (m *CirconusMetrics) RemoveGauge(metric string) { - m.gm.Lock() - defer m.gm.Unlock() - delete(m.gauges, metric) -} - -// SetGaugeFunc sets a gauge to a function [called at flush interval] -func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) { - m.gfm.Lock() - defer m.gfm.Unlock() - m.gaugeFuncs[metric] = fn -} - -// RemoveGaugeFunc removes a gauge function -func (m *CirconusMetrics) RemoveGaugeFunc(metric string) { - m.gfm.Lock() - defer m.gfm.Unlock() - delete(m.gaugeFuncs, metric) -} - -// gaugeValString converts an interface value (of a supported type) to a string -func (m *CirconusMetrics) gaugeValString(val interface{}) string { - vs := "" - switch v := val.(type) { - default: - // ignore it, unsupported type - case int: - vs = fmt.Sprintf("%d", v) - case int8: - vs = fmt.Sprintf("%d", v) - case int16: - vs = fmt.Sprintf("%d", v) - case int32: - vs = fmt.Sprintf("%d", v) - case int64: - vs = fmt.Sprintf("%d", v) - case uint: - vs = fmt.Sprintf("%d", v) - case uint8: - vs = fmt.Sprintf("%d", v) - case uint16: - vs = fmt.Sprintf("%d", v) - case uint32: - vs = fmt.Sprintf("%d", v) - case uint64: - vs = fmt.Sprintf("%d", v) - case float32: - vs = fmt.Sprintf("%f", v) - case float64: - vs = fmt.Sprintf("%f", v) - } - return vs -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go deleted file mode 100644 index 59d19e20f..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -import ( - "sync" - - "github.com/circonus-labs/circonusllhist" -) - -// Histogram measures the distribution of a stream of values. -type Histogram struct { - name string - hist *circonusllhist.Histogram - rw sync.RWMutex -} - -// Timing adds a value to a histogram -func (m *CirconusMetrics) Timing(metric string, val float64) { - m.SetHistogramValue(metric, val) -} - -// RecordValue adds a value to a histogram -func (m *CirconusMetrics) RecordValue(metric string, val float64) { - m.SetHistogramValue(metric, val) -} - -// SetHistogramValue adds a value to a histogram -func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { - m.NewHistogram(metric) - - m.histograms[metric].rw.Lock() - defer m.histograms[metric].rw.Unlock() - - m.histograms[metric].hist.RecordValue(val) -} - -// RemoveHistogram removes a histogram -func (m *CirconusMetrics) RemoveHistogram(metric string) { - m.hm.Lock() - defer m.hm.Unlock() - delete(m.histograms, metric) -} - -// NewHistogram returns a histogram instance. -func (m *CirconusMetrics) NewHistogram(metric string) *Histogram { - m.hm.Lock() - defer m.hm.Unlock() - - if hist, ok := m.histograms[metric]; ok { - return hist - } - - hist := &Histogram{ - name: metric, - hist: circonusllhist.New(), - } - - m.histograms[metric] = hist - - return hist -} - -// Name returns the name from a histogram instance -func (h *Histogram) Name() string { - return h.name -} - -// RecordValue records the given value to a histogram instance -func (h *Histogram) RecordValue(v float64) { - h.rw.Lock() - defer h.rw.Unlock() - - h.hist.RecordValue(v) -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go deleted file mode 100644 index abeb73006..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -import ( - "bytes" - "encoding/json" - "errors" - "io/ioutil" - "log" - "net" - "net/http" - "strconv" - "time" - - "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/go-retryablehttp" -) - -func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) { - if len(newMetrics) > 0 { - m.check.AddNewMetrics(newMetrics) - } - - str, err := json.Marshal(output) - if err != nil { - m.Log.Printf("[ERROR] marshling output %+v", err) - return - } - - numStats, err := m.trapCall(str) - if err != nil { - m.Log.Printf("[ERROR] %+v\n", err) - return - } - - if m.Debug { - m.Log.Printf("[DEBUG] %d stats sent\n", numStats) - } -} - -func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { - trap, err := m.check.GetTrap() - if err != nil { - return 0, err - } - - dataReader := bytes.NewReader(payload) - - req, err := retryablehttp.NewRequest("PUT", trap.URL.String(), dataReader) - if err != nil { - return 0, err - } - req.Header.Add("Accept", "application/json") - - client := retryablehttp.NewClient() - if trap.URL.Scheme == "https" { - client.HTTPClient.Transport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: trap.TLS, - DisableKeepAlives: true, - MaxIdleConnsPerHost: -1, - DisableCompression: true, - } - } else { - client.HTTPClient.Transport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - DisableKeepAlives: true, - MaxIdleConnsPerHost: -1, - DisableCompression: true, - } - } - client.RetryWaitMin = 10 * time.Millisecond - client.RetryWaitMax = 50 * time.Millisecond - client.RetryMax = 3 - client.Logger = m.Log - - attempts := -1 - client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) { - attempts = retryNumber - } - - resp, err := client.Do(req) - if err != nil { - if attempts == client.RetryMax { - m.check.RefreshTrap() - } - return 0, err - } - - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err) - } - - var response map[string]interface{} - err = json.Unmarshal(body, &response) - if err != nil { - m.Log.Printf("[ERROR] parsing body, proceeding. %s\n", err) - } - - if resp.StatusCode != 200 { - return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode)) - } - switch v := response["stats"].(type) { - case float64: - return int(v), nil - case int: - return v, nil - default: - } - return 0, errors.New("[ERROR] bad response type") -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/text.go b/vendor/github.com/circonus-labs/circonus-gometrics/text.go deleted file mode 100644 index eb2d12a87..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/text.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -// A Text metric is an arbitrary string -// - -// SetText sets a text metric -func (m *CirconusMetrics) SetText(metric string, val string) { - m.SetTextValue(metric, val) -} - -// SetTextValue sets a text metric -func (m *CirconusMetrics) SetTextValue(metric string, val string) { - m.tm.Lock() - defer m.tm.Unlock() - m.text[metric] = val -} - -// RemoveText removes a text metric -func (m *CirconusMetrics) RemoveText(metric string) { - m.tm.Lock() - defer m.tm.Unlock() - delete(m.text, metric) -} - -// SetTextFunc sets a text metric to a function [called at flush interval] -func (m *CirconusMetrics) SetTextFunc(metric string, fn func() string) { - m.tfm.Lock() - defer m.tfm.Unlock() - m.textFuncs[metric] = fn -} - -// RemoveTextFunc a text metric function -func (m *CirconusMetrics) RemoveTextFunc(metric string) { - m.tfm.Lock() - defer m.tfm.Unlock() - delete(m.textFuncs, metric) -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go deleted file mode 100644 index 73259a7b1..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -import ( - "net/http" - "time" -) - -// TrackHTTPLatency wraps Handler functions registered with an http.ServerMux tracking latencies. -// Metrics are of the for go`HTTP```latency and are tracked in a histogram in units -// of seconds (as a float64) providing nanosecond ganularity. -func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { - return func(rw http.ResponseWriter, req *http.Request) { - start := time.Now().UnixNano() - handler(rw, req) - elapsed := time.Now().UnixNano() - start - //hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency") - m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second)) - } -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/util.go b/vendor/github.com/circonus-labs/circonus-gometrics/util.go deleted file mode 100644 index d5cc7d56c..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/util.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package circonusgometrics - -import ( - "github.com/circonus-labs/circonusllhist" -) - -// Reset removes all existing counters and gauges. -func (m *CirconusMetrics) Reset() { - m.cm.Lock() - defer m.cm.Unlock() - - m.cfm.Lock() - defer m.cfm.Unlock() - - m.gm.Lock() - defer m.gm.Unlock() - - m.gfm.Lock() - defer m.gfm.Unlock() - - m.hm.Lock() - defer m.hm.Unlock() - - m.tm.Lock() - defer m.tm.Unlock() - - m.tfm.Lock() - defer m.tfm.Unlock() - - m.counters = make(map[string]uint64) - m.counterFuncs = make(map[string]func() uint64) - m.gauges = make(map[string]string) - m.gaugeFuncs = make(map[string]func() int64) - m.histograms = make(map[string]*Histogram) - m.text = make(map[string]string) - m.textFuncs = make(map[string]func() string) -} - -// snapshot returns a copy of the values of all registered counters and gauges. -func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) { - m.cm.Lock() - defer m.cm.Unlock() - - m.cfm.Lock() - defer m.cfm.Unlock() - - m.gm.Lock() - defer m.gm.Unlock() - - m.gfm.Lock() - defer m.gfm.Unlock() - - m.hm.Lock() - defer m.hm.Unlock() - - m.tm.Lock() - defer m.tm.Unlock() - - m.tfm.Lock() - defer m.tfm.Unlock() - - c = make(map[string]uint64, len(m.counters)+len(m.counterFuncs)) - for n, v := range m.counters { - c[n] = v - } - - for n, f := range m.counterFuncs { - c[n] = f() - } - - //g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs)) - g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) - for n, v := range m.gauges { - g[n] = v - } - - for n, f := range m.gaugeFuncs { - g[n] = m.gaugeValString(f()) - } - - h = make(map[string]*circonusllhist.Histogram, len(m.histograms)) - for n, hist := range m.histograms { - h[n] = hist.hist.CopyAndReset() - } - - t = make(map[string]string, len(m.text)+len(m.textFuncs)) - for n, v := range m.text { - t[n] = v - } - - for n, f := range m.textFuncs { - t[n] = f() - } - - return -} diff --git a/vendor/github.com/circonus-labs/circonusllhist/LICENSE b/vendor/github.com/circonus-labs/circonusllhist/LICENSE deleted file mode 100644 index dc014a4ac..000000000 --- a/vendor/github.com/circonus-labs/circonusllhist/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2016 Circonus, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name Circonus, Inc. nor the names of its contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go b/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go deleted file mode 100644 index cf4f482c1..000000000 --- a/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2016, Circonus, Inc. All rights reserved. -// See the LICENSE file. - -// Package circllhist provides an implementation of Circonus' fixed log-linear -// histogram data structure. This allows tracking of histograms in a -// composable way such that accurate error can be reasoned about. -package circonusllhist - -import ( - "bytes" - "errors" - "fmt" - "math" - "sync" -) - -const ( - DEFAULT_HIST_SIZE = int16(100) -) - -var power_of_ten = [...]float64{ - 1, 10, 100, 1000, 10000, 100000, 1e+06, 1e+07, 1e+08, 1e+09, 1e+10, - 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, - 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, - 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, - 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, - 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, - 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, - 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, - 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, - 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, - 1e+101, 1e+102, 1e+103, 1e+104, 1e+105, 1e+106, 1e+107, 1e+108, 1e+109, - 1e+110, 1e+111, 1e+112, 1e+113, 1e+114, 1e+115, 1e+116, 1e+117, 1e+118, - 1e+119, 1e+120, 1e+121, 1e+122, 1e+123, 1e+124, 1e+125, 1e+126, 1e+127, - 1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120, - 1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111, - 1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, 1e-102, - 1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96, - 1e-95, 1e-94, 1e-93, 1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86, - 1e-85, 1e-84, 1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76, - 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66, - 1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, - 1e-55, 1e-54, 1e-53, 1e-52, 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, - 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, - 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, 1e-28, 1e-27, 1e-26, - 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, 1e-17, 1e-16, - 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-09, 1e-08, 1e-07, 1e-06, - 1e-05, 0.0001, 0.001, 0.01, 0.1, -} - -// A Bracket is a part of a cumulative distribution. -type Bin struct { - val int8 - exp int8 - count uint64 -} - -func NewBinRaw(val int8, exp int8, count uint64) *Bin { - return &Bin{ - val: val, - exp: exp, - count: count, - } -} -func NewBin() *Bin { - return NewBinRaw(0, 0, 0) -} -func NewBinFromFloat64(d float64) *Bin { - hb := NewBinRaw(0, 0, 0) - hb.SetFromFloat64(d) - return hb -} -func (hb *Bin) SetFromFloat64(d float64) *Bin { - hb.val = -1 - if math.IsInf(d, 0) || math.IsNaN(d) { - return hb - } - if d == 0.0 { - hb.val = 0 - return hb - } - sign := 1 - if math.Signbit(d) { - sign = -1 - } - d = math.Abs(d) - big_exp := int(math.Floor(math.Log10(d))) - hb.exp = int8(big_exp) - if int(hb.exp) != big_exp { //rolled - hb.exp = 0 - if big_exp < 0 { - hb.val = 0 - } - return hb - } - d = d / hb.PowerOfTen() - d = d * 10 - hb.val = int8(sign * int(math.Floor(d+1e-13))) - if hb.val == 100 || hb.val == -100 { - if hb.exp < 127 { - hb.val = hb.val / 10 - hb.exp++ - } else { - hb.val = 0 - hb.exp = 0 - } - } - if hb.val == 0 { - hb.exp = 0 - return hb - } - if !((hb.val >= 10 && hb.val < 100) || - (hb.val <= -10 && hb.val > -100)) { - hb.val = -1 - hb.exp = 0 - } - return hb -} -func (hb *Bin) PowerOfTen() float64 { - idx := int(hb.exp) - if idx < 0 { - idx = 256 + idx - } - return power_of_ten[idx] -} - -func (hb *Bin) IsNaN() bool { - if hb.val > 99 || hb.val < -99 { - return true - } - return false -} -func (hb *Bin) Val() int8 { - return hb.val -} -func (hb *Bin) Exp() int8 { - return hb.exp -} -func (hb *Bin) Count() uint64 { - return hb.count -} -func (hb *Bin) Value() float64 { - if hb.IsNaN() { - return math.NaN() - } - if hb.val < 10 && hb.val > -10 { - return 0.0 - } - return (float64(hb.val) / 10.0) * hb.PowerOfTen() -} -func (hb *Bin) BinWidth() float64 { - if hb.IsNaN() { - return math.NaN() - } - if hb.val < 10 && hb.val > -10 { - return 0.0 - } - return hb.PowerOfTen() / 10.0 -} -func (hb *Bin) Midpoint() float64 { - if hb.IsNaN() { - return math.NaN() - } - out := hb.Value() - if out == 0 { - return 0 - } - interval := hb.BinWidth() - if out < 0 { - interval = interval * -1 - } - return out + interval/2.0 -} -func (hb *Bin) Left() float64 { - if hb.IsNaN() { - return math.NaN() - } - out := hb.Value() - if out >= 0 { - return out - } - return out - hb.BinWidth() -} - -func (h1 *Bin) Compare(h2 *Bin) int { - if h1.val == h2.val && h1.exp == h2.exp { - return 0 - } - if h1.val == -1 { - return 1 - } - if h2.val == -1 { - return -1 - } - if h1.val == 0 { - if h2.val > 0 { - return 1 - } - return -1 - } - if h2.val == 0 { - if h1.val < 0 { - return 1 - } - return -1 - } - if h1.val < 0 && h2.val > 0 { - return 1 - } - if h1.val > 0 && h2.val < 0 { - return -1 - } - if h1.exp == h2.exp { - if h1.val < h2.val { - return 1 - } - return -1 - } - if h1.exp > h2.exp { - if h1.val < 0 { - return 1 - } - return -1 - } - if h1.exp < h2.exp { - if h1.val < 0 { - return -1 - } - return 1 - } - return 0 -} - -// This histogram structure tracks values are two decimal digits of precision -// with a bounded error that remains bounded upon composition -type Histogram struct { - mutex sync.Mutex - bvs []Bin - used int16 - allocd int16 -} - -// New returns a new Histogram -func New() *Histogram { - return &Histogram{ - allocd: DEFAULT_HIST_SIZE, - used: 0, - bvs: make([]Bin, DEFAULT_HIST_SIZE), - } -} - -// Max returns the approximate maximum recorded value. -func (h *Histogram) Max() float64 { - return h.ValueAtQuantile(1.0) -} - -// Min returns the approximate minimum recorded value. -func (h *Histogram) Min() float64 { - return h.ValueAtQuantile(0.0) -} - -// Mean returns the approximate arithmetic mean of the recorded values. -func (h *Histogram) Mean() float64 { - return h.ApproxMean() -} - -// Reset forgets all bins in the histogram (they remain allocated) -func (h *Histogram) Reset() { - h.mutex.Lock() - h.used = 0 - h.mutex.Unlock() -} - -// RecordValue records the given value, returning an error if the value is out -// of range. -func (h *Histogram) RecordValue(v float64) error { - return h.RecordValues(v, 1) -} - -// RecordCorrectedValue records the given value, correcting for stalls in the -// recording process. This only works for processes which are recording values -// at an expected interval (e.g., doing jitter analysis). Processes which are -// recording ad-hoc values (e.g., latency for incoming requests) can't take -// advantage of this. -// CH Compat -func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { - if err := h.RecordValue(float64(v)); err != nil { - return err - } - - if expectedInterval <= 0 || v <= expectedInterval { - return nil - } - - missingValue := v - expectedInterval - for missingValue >= expectedInterval { - if err := h.RecordValue(float64(missingValue)); err != nil { - return err - } - missingValue -= expectedInterval - } - - return nil -} - -// find where a new bin should go -func (h *Histogram) InternalFind(hb *Bin) (bool, int16) { - if h.used == 0 { - return false, 0 - } - rv := -1 - idx := int16(0) - l := int16(0) - r := h.used - 1 - for l < r { - check := (r + l) / 2 - rv = h.bvs[check].Compare(hb) - if rv == 0 { - l = check - r = check - } else if rv > 0 { - l = check + 1 - } else { - r = check - 1 - } - } - if rv != 0 { - rv = h.bvs[l].Compare(hb) - } - idx = l - if rv == 0 { - return true, idx - } - if rv < 0 { - return false, idx - } - idx++ - return false, idx -} - -func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 { - h.mutex.Lock() - defer h.mutex.Unlock() - if count == 0 { - return 0 - } - found, idx := h.InternalFind(hb) - if !found { - if h.used == h.allocd { - new_bvs := make([]Bin, h.allocd+DEFAULT_HIST_SIZE) - if idx > 0 { - copy(new_bvs[0:], h.bvs[0:idx]) - } - if idx < h.used { - copy(new_bvs[idx+1:], h.bvs[idx:]) - } - h.allocd = h.allocd + DEFAULT_HIST_SIZE - h.bvs = new_bvs - } else { - copy(h.bvs[idx+1:], h.bvs[idx:h.used]) - } - h.bvs[idx].val = hb.val - h.bvs[idx].exp = hb.exp - h.bvs[idx].count = uint64(count) - h.used++ - return h.bvs[idx].count - } - var newval uint64 - if count < 0 { - newval = h.bvs[idx].count - uint64(-count) - } else { - newval = h.bvs[idx].count + uint64(count) - } - if newval < h.bvs[idx].count { //rolled - newval = ^uint64(0) - } - h.bvs[idx].count = newval - return newval - h.bvs[idx].count -} - -// RecordValues records n occurrences of the given value, returning an error if -// the value is out of range. -func (h *Histogram) RecordValues(v float64, n int64) error { - var hb Bin - hb.SetFromFloat64(v) - h.InsertBin(&hb, n) - return nil -} - -// Approximate mean -func (h *Histogram) ApproxMean() float64 { - h.mutex.Lock() - defer h.mutex.Unlock() - divisor := 0.0 - sum := 0.0 - for i := int16(0); i < h.used; i++ { - midpoint := h.bvs[i].Midpoint() - cardinality := float64(h.bvs[i].count) - divisor += cardinality - sum += midpoint * cardinality - } - if divisor == 0.0 { - return math.NaN() - } - return sum / divisor -} - -// Approximate sum -func (h *Histogram) ApproxSum() float64 { - h.mutex.Lock() - defer h.mutex.Unlock() - sum := 0.0 - for i := int16(0); i < h.used; i++ { - midpoint := h.bvs[i].Midpoint() - cardinality := float64(h.bvs[i].count) - sum += midpoint * cardinality - } - return sum -} - -func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { - h.mutex.Lock() - defer h.mutex.Unlock() - q_out := make([]float64, len(q_in)) - i_q, i_b := 0, int16(0) - total_cnt, bin_width, bin_left, lower_cnt, upper_cnt := 0.0, 0.0, 0.0, 0.0, 0.0 - if len(q_in) == 0 { - return q_out, nil - } - // Make sure the requested quantiles are in order - for i_q = 1; i_q < len(q_in); i_q++ { - if q_in[i_q-1] > q_in[i_q] { - return nil, errors.New("out of order") - } - } - // Add up the bins - for i_b = 0; i_b < h.used; i_b++ { - if !h.bvs[i_b].IsNaN() { - total_cnt += float64(h.bvs[i_b].count) - } - } - if total_cnt == 0.0 { - return nil, errors.New("empty_histogram") - } - - for i_q = 0; i_q < len(q_in); i_q++ { - if q_in[i_q] < 0.0 || q_in[i_q] > 1.0 { - return nil, errors.New("out of bound quantile") - } - q_out[i_q] = total_cnt * q_in[i_q] - } - - for i_b = 0; i_b < h.used; i_b++ { - if h.bvs[i_b].IsNaN() { - continue - } - bin_width = h.bvs[i_b].BinWidth() - bin_left = h.bvs[i_b].Left() - lower_cnt = upper_cnt - upper_cnt = lower_cnt + float64(h.bvs[i_b].count) - break - } - for i_q = 0; i_q < len(q_in); i_q++ { - for i_b < (h.used-1) && upper_cnt < q_out[i_q] { - i_b++ - bin_width = h.bvs[i_b].BinWidth() - bin_left = h.bvs[i_b].Left() - lower_cnt = upper_cnt - upper_cnt = lower_cnt + float64(h.bvs[i_b].count) - } - if lower_cnt == q_out[i_q] { - q_out[i_q] = bin_left - } else if upper_cnt == q_out[i_q] { - q_out[i_q] = bin_left + bin_width - } else { - if bin_width == 0 { - q_out[i_q] = bin_left - } else { - q_out[i_q] = bin_left + (q_out[i_q]-lower_cnt)/(upper_cnt-lower_cnt)*bin_width - } - } - } - return q_out, nil -} - -// ValueAtQuantile returns the recorded value at the given quantile (0..1). -func (h *Histogram) ValueAtQuantile(q float64) float64 { - h.mutex.Lock() - defer h.mutex.Unlock() - q_in := make([]float64, 1) - q_in[0] = q - q_out, err := h.ApproxQuantile(q_in) - if err == nil && len(q_out) == 1 { - return q_out[0] - } - return math.NaN() -} - -// SignificantFigures returns the significant figures used to create the -// histogram -// CH Compat -func (h *Histogram) SignificantFigures() int64 { - return 2 -} - -// Equals returns true if the two Histograms are equivalent, false if not. -func (h *Histogram) Equals(other *Histogram) bool { - h.mutex.Lock() - other.mutex.Lock() - defer h.mutex.Unlock() - defer other.mutex.Unlock() - switch { - case - h.used != other.used: - return false - default: - for i := int16(0); i < h.used; i++ { - if h.bvs[i].Compare(&other.bvs[i]) != 0 { - return false - } - if h.bvs[i].count != other.bvs[i].count { - return false - } - } - } - return true -} - -func (h *Histogram) CopyAndReset() *Histogram { - h.mutex.Lock() - defer h.mutex.Unlock() - newhist := &Histogram{ - allocd: h.allocd, - used: h.used, - bvs: h.bvs, - } - h.allocd = DEFAULT_HIST_SIZE - h.bvs = make([]Bin, DEFAULT_HIST_SIZE) - h.used = 0 - return newhist -} -func (h *Histogram) DecStrings() []string { - h.mutex.Lock() - defer h.mutex.Unlock() - out := make([]string, h.used) - for i, bin := range h.bvs[0:h.used] { - var buffer bytes.Buffer - buffer.WriteString("H[") - buffer.WriteString(fmt.Sprintf("%3.1e", bin.Value())) - buffer.WriteString("]=") - buffer.WriteString(fmt.Sprintf("%v", bin.count)) - out[i] = buffer.String() - } - return out -} diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS b/vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS deleted file mode 100644 index b430b78ea..000000000 --- a/vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -These people have provided bug fixes, new features or improved the documentation. - -* Daniel Garcia -* Peter Teichman -* Phillip Kovalev -* Richard Crowley -* Timothée Peignier -* Tomás Senart diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE b/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE deleted file mode 100644 index fdfc8abff..000000000 --- a/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright 2015 Timothée Peignier. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/README.md b/vendor/github.com/cyberdelia/go-metrics-graphite/README.md deleted file mode 100644 index e43f4df82..000000000 --- a/vendor/github.com/cyberdelia/go-metrics-graphite/README.md +++ /dev/null @@ -1,19 +0,0 @@ -This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) -library which will post the metrics to Graphite. It was originally part of the -`go-metrics` library itself, but has been split off to make maintenance of -both the core library and the client easier. - -### Usage - -```go -import "github.com/cyberdelia/go-metrics-graphite" - - -go graphite.Graphite(metrics.DefaultRegistry, - 1*time.Second, "some.prefix", addr) -``` - -### Migrating from `rcrowley/go-metrics` implementation - -Simply modify the import from `"github.com/rcrowley/go-metrics/librato"` to -`"github.com/cyberdelia/go-metrics-graphite"` and it should Just Work. diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go b/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go deleted file mode 100644 index ffc309b81..000000000 --- a/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go +++ /dev/null @@ -1,113 +0,0 @@ -package graphite - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" - - "github.com/rcrowley/go-metrics" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry metrics.Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r metrics.Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -// GraphiteOnce performs a single submission to Graphite, returning a -// non-nil error on failed connections. This can be used in a loop -// similar to GraphiteWithConfig for custom error handling. -func GraphiteOnce(c GraphiteConfig) error { - return graphite(&c) -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case metrics.Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) - case metrics.Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) - case metrics.GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) - case metrics.Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case metrics.Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case metrics.Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx]/du, now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE deleted file mode 100644 index 34504e4b3..000000000 --- a/vendor/github.com/fatih/structs/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md deleted file mode 100644 index 44e01006e..000000000 --- a/vendor/github.com/fatih/structs/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) - -Structs contains various utilities to work with Go (Golang) structs. It was -initially used by me to convert a struct into a `map[string]interface{}`. With -time I've added other utilities for structs. It's basically a high level -package based on primitives from the reflect package. Feel free to add new -functions or improve the existing code. - -## Install - -```bash -go get github.com/fatih/structs -``` - -## Usage and Examples - -Just like the standard lib `strings`, `bytes` and co packages, `structs` has -many global functions to manipulate or organize your struct data. Lets define -and declare a struct: - -```go -type Server struct { - Name string `json:"name,omitempty"` - ID int - Enabled bool - users []string // not exported - http.Server // embedded -} - -server := &Server{ - Name: "gopher", - ID: 123456, - Enabled: true, -} -``` - -```go -// Convert a struct to a map[string]interface{} -// => {"Name":"gopher", "ID":123456, "Enabled":true} -m := structs.Map(server) - -// Convert the values of a struct to a []interface{} -// => ["gopher", 123456, true] -v := structs.Values(server) - -// Convert the names of a struct to a []string -// (see "Names methods" for more info about fields) -n := structs.Names(server) - -// Convert the values of a struct to a []*Field -// (see "Field methods" for more info about fields) -f := structs.Fields(server) - -// Return the struct name => "Server" -n := structs.Name(server) - -// Check if any field of a struct is initialized or not. -h := structs.HasZero(server) - -// Check if all fields of a struct is initialized or not. -z := structs.IsZero(server) - -// Check if server is a struct or a pointer to struct -i := structs.IsStruct(server) -``` - -### Struct methods - -The structs functions can be also used as independent methods by creating a new -`*structs.Struct`. This is handy if you want to have more control over the -structs (such as retrieving a single Field). - -```go -// Create a new struct type: -s := structs.New(server) - -m := s.Map() // Get a map[string]interface{} -v := s.Values() // Get a []interface{} -f := s.Fields() // Get a []*Field -n := s.Names() // Get a []string -f := s.Field(name) // Get a *Field based on the given field name -f, ok := s.FieldOk(name) // Get a *Field based on the given field name -n := s.Name() // Get the struct name -h := s.HasZero() // Check if any field is initialized -z := s.IsZero() // Check if all fields are initialized -``` - -### Field methods - -We can easily examine a single Field for more detail. Below you can see how we -get and interact with various field methods: - - -```go -s := structs.New(server) - -// Get the Field struct for the "Name" field -name := s.Field("Name") - -// Get the underlying value, value => "gopher" -value := name.Value().(string) - -// Set the field's value -name.Set("another gopher") - -// Get the field's kind, kind => "string" -name.Kind() - -// Check if the field is exported or not -if name.IsExported() { - fmt.Println("Name field is exported") -} - -// Check if the value is a zero value, such as "" for string, 0 for int -if !name.IsZero() { - fmt.Println("Name is initialized") -} - -// Check if the field is an anonymous (embedded) field -if !name.IsEmbedded() { - fmt.Println("Name is not an embedded field") -} - -// Get the Field's tag value for tag name "json", tag value => "name,omitempty" -tagValue := name.Tag("json") -``` - -Nested structs are supported too: - -```go -addrField := s.Field("Server").Field("Addr") - -// Get the value for addr -a := addrField.Value().(string) - -// Or get all fields -httpServer := s.Field("Server").Fields() -``` - -We can also get a slice of Fields from the Struct type to iterate over all -fields. This is handy if you wish to examine all fields: - -```go -s := structs.New(server) - -for _, f := range s.Fields() { - fmt.Printf("field name: %+v\n", f.Name()) - - if f.IsExported() { - fmt.Printf("value : %+v\n", f.Value()) - fmt.Printf("is zero : %+v\n", f.IsZero()) - } -} -``` - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * [Cihangir Savas](https://github.com/cihangir) - -## License - -The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go deleted file mode 100644 index e69783230..000000000 --- a/vendor/github.com/fatih/structs/field.go +++ /dev/null @@ -1,141 +0,0 @@ -package structs - -import ( - "errors" - "fmt" - "reflect" -) - -var ( - errNotExported = errors.New("field is not exported") - errNotSettable = errors.New("field is not settable") -) - -// Field represents a single struct field that encapsulates high level -// functions around the field. -type Field struct { - value reflect.Value - field reflect.StructField - defaultTag string -} - -// Tag returns the value associated with key in the tag string. If there is no -// such key in the tag, Tag returns the empty string. -func (f *Field) Tag(key string) string { - return f.field.Tag.Get(key) -} - -// Value returns the underlying value of the field. It panics if the field -// is not exported. -func (f *Field) Value() interface{} { - return f.value.Interface() -} - -// IsEmbedded returns true if the given field is an anonymous field (embedded) -func (f *Field) IsEmbedded() bool { - return f.field.Anonymous -} - -// IsExported returns true if the given field is exported. -func (f *Field) IsExported() bool { - return f.field.PkgPath == "" -} - -// IsZero returns true if the given field is not initialized (has a zero value). -// It panics if the field is not exported. -func (f *Field) IsZero() bool { - zero := reflect.Zero(f.value.Type()).Interface() - current := f.Value() - - return reflect.DeepEqual(current, zero) -} - -// Name returns the name of the given field -func (f *Field) Name() string { - return f.field.Name -} - -// Kind returns the fields kind, such as "string", "map", "bool", etc .. -func (f *Field) Kind() reflect.Kind { - return f.value.Kind() -} - -// Set sets the field to given value v. It returns an error if the field is not -// settable (not addressable or not exported) or if the given value's type -// doesn't match the fields type. -func (f *Field) Set(val interface{}) error { - // we can't set unexported fields, so be sure this field is exported - if !f.IsExported() { - return errNotExported - } - - // do we get here? not sure... - if !f.value.CanSet() { - return errNotSettable - } - - given := reflect.ValueOf(val) - - if f.value.Kind() != given.Kind() { - return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind()) - } - - f.value.Set(given) - return nil -} - -// Zero sets the field to its zero value. It returns an error if the field is not -// settable (not addressable or not exported). -func (f *Field) Zero() error { - zero := reflect.Zero(f.value.Type()).Interface() - return f.Set(zero) -} - -// Fields returns a slice of Fields. This is particular handy to get the fields -// of a nested struct . A struct tag with the content of "-" ignores the -// checking of that particular field. Example: -// -// // Field is ignored by this package. -// Field *http.Request `structs:"-"` -// -// It panics if field is not exported or if field's kind is not struct -func (f *Field) Fields() []*Field { - return getFields(f.value, f.defaultTag) -} - -// Field returns the field from a nested struct. It panics if the nested struct -// is not exported or if the field was not found. -func (f *Field) Field(name string) *Field { - field, ok := f.FieldOk(name) - if !ok { - panic("field not found") - } - - return field -} - -// FieldOk returns the field from a nested struct. The boolean returns whether -// the field was found (true) or not (false). -func (f *Field) FieldOk(name string) (*Field, bool) { - value := &f.value - // value must be settable so we need to make sure it holds the address of the - // variable and not a copy, so we can pass the pointer to strctVal instead of a - // copy (which is not assigned to any variable, hence not settable). - // see "https://blog.golang.org/laws-of-reflection#TOC_8." - if f.value.Kind() != reflect.Ptr { - a := f.value.Addr() - value = &a - } - v := strctVal(value.Interface()) - t := v.Type() - - field, ok := t.FieldByName(name) - if !ok { - return nil, false - } - - return &Field{ - field: field, - value: v.FieldByName(name), - }, true -} diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go deleted file mode 100644 index 39eb083d3..000000000 --- a/vendor/github.com/fatih/structs/structs.go +++ /dev/null @@ -1,507 +0,0 @@ -// Package structs contains various utilities functions to work with structs. -package structs - -import ( - "fmt" - - "reflect" -) - -var ( - // DefaultTagName is the default tag name for struct fields which provides - // a more granular to tweak certain structs. Lookup the necessary functions - // for more info. - DefaultTagName = "structs" // struct's field default tag name -) - -// Struct encapsulates a struct type to provide several high level functions -// around the struct. -type Struct struct { - raw interface{} - value reflect.Value - TagName string -} - -// New returns a new *Struct with the struct s. It panics if the s's kind is -// not struct. -func New(s interface{}) *Struct { - return &Struct{ - raw: s, - value: strctVal(s), - TagName: DefaultTagName, - } -} - -// Map converts the given struct to a map[string]interface{}, where the keys -// of the map are the field names and the values of the map the associated -// values of the fields. The default key string is the struct field name but -// can be changed in the struct field's tag value. The "structs" key in the -// struct's field tag value is the key name. Example: -// -// // Field appears in map as key "myName". -// Name string `structs:"myName"` -// -// A tag value with the content of "-" ignores that particular field. Example: -// -// // Field is ignored by this package. -// Field bool `structs:"-"` -// -// A tag value with the content of "string" uses the stringer to get the value. Example: -// -// // The value will be output of Animal's String() func. -// // Map will panic if Animal does not implement String(). -// Field *Animal `structs:"field,string"` -// -// A tag value with the option of "flatten" used in a struct field is to flatten its fields -// in the output map. Example: -// -// // The FieldStruct's fields will be flattened into the output map. -// FieldStruct time.Time `structs:"flatten"` -// -// A tag value with the option of "omitnested" stops iterating further if the type -// is a struct. Example: -// -// // Field is not processed further by this package. -// Field time.Time `structs:"myName,omitnested"` -// Field *http.Request `structs:",omitnested"` -// -// A tag value with the option of "omitempty" ignores that particular field if -// the field value is empty. Example: -// -// // Field appears in map as key "myName", but the field is -// // skipped if empty. -// Field string `structs:"myName,omitempty"` -// -// // Field appears in map as key "Field" (the default), but -// // the field is skipped if empty. -// Field string `structs:",omitempty"` -// -// Note that only exported fields of a struct can be accessed, non exported -// fields will be neglected. -func (s *Struct) Map() map[string]interface{} { - out := make(map[string]interface{}) - s.FillMap(out) - return out -} - -// FillMap is the same as Map. Instead of returning the output, it fills the -// given map. -func (s *Struct) FillMap(out map[string]interface{}) { - if out == nil { - return - } - - fields := s.structFields() - - for _, field := range fields { - name := field.Name - val := s.value.FieldByName(name) - isSubStruct := false - var finalVal interface{} - - tagName, tagOpts := parseTag(field.Tag.Get(s.TagName)) - if tagName != "" { - name = tagName - } - - // if the value is a zero value and the field is marked as omitempty do - // not include - if tagOpts.Has("omitempty") { - zero := reflect.Zero(val.Type()).Interface() - current := val.Interface() - - if reflect.DeepEqual(current, zero) { - continue - } - } - - if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { - // look out for embedded structs, and convert them to a - // map[string]interface{} too - n := New(val.Interface()) - n.TagName = s.TagName - m := n.Map() - isSubStruct = true - if len(m) == 0 { - finalVal = val.Interface() - } else { - finalVal = m - } - } else { - finalVal = val.Interface() - } - - if tagOpts.Has("string") { - s, ok := val.Interface().(fmt.Stringer) - if ok { - out[name] = s.String() - } - continue - } - - if isSubStruct && (tagOpts.Has("flatten")) { - for k := range finalVal.(map[string]interface{}) { - out[k] = finalVal.(map[string]interface{})[k] - } - } else { - out[name] = finalVal - } - } -} - -// Values converts the given s struct's field values to a []interface{}. A -// struct tag with the content of "-" ignores the that particular field. -// Example: -// -// // Field is ignored by this package. -// Field int `structs:"-"` -// -// A value with the option of "omitnested" stops iterating further if the type -// is a struct. Example: -// -// // Fields is not processed further by this package. -// Field time.Time `structs:",omitnested"` -// Field *http.Request `structs:",omitnested"` -// -// A tag value with the option of "omitempty" ignores that particular field and -// is not added to the values if the field value is empty. Example: -// -// // Field is skipped if empty -// Field string `structs:",omitempty"` -// -// Note that only exported fields of a struct can be accessed, non exported -// fields will be neglected. -func (s *Struct) Values() []interface{} { - fields := s.structFields() - - var t []interface{} - - for _, field := range fields { - val := s.value.FieldByName(field.Name) - - _, tagOpts := parseTag(field.Tag.Get(s.TagName)) - - // if the value is a zero value and the field is marked as omitempty do - // not include - if tagOpts.Has("omitempty") { - zero := reflect.Zero(val.Type()).Interface() - current := val.Interface() - - if reflect.DeepEqual(current, zero) { - continue - } - } - - if tagOpts.Has("string") { - s, ok := val.Interface().(fmt.Stringer) - if ok { - t = append(t, s.String()) - } - continue - } - - if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { - // look out for embedded structs, and convert them to a - // []interface{} to be added to the final values slice - for _, embeddedVal := range Values(val.Interface()) { - t = append(t, embeddedVal) - } - } else { - t = append(t, val.Interface()) - } - } - - return t -} - -// Fields returns a slice of Fields. A struct tag with the content of "-" -// ignores the checking of that particular field. Example: -// -// // Field is ignored by this package. -// Field bool `structs:"-"` -// -// It panics if s's kind is not struct. -func (s *Struct) Fields() []*Field { - return getFields(s.value, s.TagName) -} - -// Names returns a slice of field names. A struct tag with the content of "-" -// ignores the checking of that particular field. Example: -// -// // Field is ignored by this package. -// Field bool `structs:"-"` -// -// It panics if s's kind is not struct. -func (s *Struct) Names() []string { - fields := getFields(s.value, s.TagName) - - names := make([]string, len(fields)) - - for i, field := range fields { - names[i] = field.Name() - } - - return names -} - -func getFields(v reflect.Value, tagName string) []*Field { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - t := v.Type() - - var fields []*Field - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - - if tag := field.Tag.Get(tagName); tag == "-" { - continue - } - - f := &Field{ - field: field, - value: v.FieldByName(field.Name), - } - - fields = append(fields, f) - - } - - return fields -} - -// Field returns a new Field struct that provides several high level functions -// around a single struct field entity. It panics if the field is not found. -func (s *Struct) Field(name string) *Field { - f, ok := s.FieldOk(name) - if !ok { - panic("field not found") - } - - return f -} - -// FieldOk returns a new Field struct that provides several high level functions -// around a single struct field entity. The boolean returns true if the field -// was found. -func (s *Struct) FieldOk(name string) (*Field, bool) { - t := s.value.Type() - - field, ok := t.FieldByName(name) - if !ok { - return nil, false - } - - return &Field{ - field: field, - value: s.value.FieldByName(name), - defaultTag: s.TagName, - }, true -} - -// IsZero returns true if all fields in a struct is a zero value (not -// initialized) A struct tag with the content of "-" ignores the checking of -// that particular field. Example: -// -// // Field is ignored by this package. -// Field bool `structs:"-"` -// -// A value with the option of "omitnested" stops iterating further if the type -// is a struct. Example: -// -// // Field is not processed further by this package. -// Field time.Time `structs:"myName,omitnested"` -// Field *http.Request `structs:",omitnested"` -// -// Note that only exported fields of a struct can be accessed, non exported -// fields will be neglected. It panics if s's kind is not struct. -func (s *Struct) IsZero() bool { - fields := s.structFields() - - for _, field := range fields { - val := s.value.FieldByName(field.Name) - - _, tagOpts := parseTag(field.Tag.Get(s.TagName)) - - if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { - ok := IsZero(val.Interface()) - if !ok { - return false - } - - continue - } - - // zero value of the given field, such as "" for string, 0 for int - zero := reflect.Zero(val.Type()).Interface() - - // current value of the given field - current := val.Interface() - - if !reflect.DeepEqual(current, zero) { - return false - } - } - - return true -} - -// HasZero returns true if a field in a struct is not initialized (zero value). -// A struct tag with the content of "-" ignores the checking of that particular -// field. Example: -// -// // Field is ignored by this package. -// Field bool `structs:"-"` -// -// A value with the option of "omitnested" stops iterating further if the type -// is a struct. Example: -// -// // Field is not processed further by this package. -// Field time.Time `structs:"myName,omitnested"` -// Field *http.Request `structs:",omitnested"` -// -// Note that only exported fields of a struct can be accessed, non exported -// fields will be neglected. It panics if s's kind is not struct. -func (s *Struct) HasZero() bool { - fields := s.structFields() - - for _, field := range fields { - val := s.value.FieldByName(field.Name) - - _, tagOpts := parseTag(field.Tag.Get(s.TagName)) - - if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { - ok := HasZero(val.Interface()) - if ok { - return true - } - - continue - } - - // zero value of the given field, such as "" for string, 0 for int - zero := reflect.Zero(val.Type()).Interface() - - // current value of the given field - current := val.Interface() - - if reflect.DeepEqual(current, zero) { - return true - } - } - - return false -} - -// Name returns the structs's type name within its package. For more info refer -// to Name() function. -func (s *Struct) Name() string { - return s.value.Type().Name() -} - -// structFields returns the exported struct fields for a given s struct. This -// is a convenient helper method to avoid duplicate code in some of the -// functions. -func (s *Struct) structFields() []reflect.StructField { - t := s.value.Type() - - var f []reflect.StructField - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - // we can't access the value of unexported fields - if field.PkgPath != "" { - continue - } - - // don't check if it's omitted - if tag := field.Tag.Get(s.TagName); tag == "-" { - continue - } - - f = append(f, field) - } - - return f -} - -func strctVal(s interface{}) reflect.Value { - v := reflect.ValueOf(s) - - // if pointer get the underlying element≤ - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - if v.Kind() != reflect.Struct { - panic("not struct") - } - - return v -} - -// Map converts the given struct to a map[string]interface{}. For more info -// refer to Struct types Map() method. It panics if s's kind is not struct. -func Map(s interface{}) map[string]interface{} { - return New(s).Map() -} - -// FillMap is the same as Map. Instead of returning the output, it fills the -// given map. -func FillMap(s interface{}, out map[string]interface{}) { - New(s).FillMap(out) -} - -// Values converts the given struct to a []interface{}. For more info refer to -// Struct types Values() method. It panics if s's kind is not struct. -func Values(s interface{}) []interface{} { - return New(s).Values() -} - -// Fields returns a slice of *Field. For more info refer to Struct types -// Fields() method. It panics if s's kind is not struct. -func Fields(s interface{}) []*Field { - return New(s).Fields() -} - -// Names returns a slice of field names. For more info refer to Struct types -// Names() method. It panics if s's kind is not struct. -func Names(s interface{}) []string { - return New(s).Names() -} - -// IsZero returns true if all fields is equal to a zero value. For more info -// refer to Struct types IsZero() method. It panics if s's kind is not struct. -func IsZero(s interface{}) bool { - return New(s).IsZero() -} - -// HasZero returns true if any field is equal to a zero value. For more info -// refer to Struct types HasZero() method. It panics if s's kind is not struct. -func HasZero(s interface{}) bool { - return New(s).HasZero() -} - -// IsStruct returns true if the given variable is a struct or a pointer to -// struct. -func IsStruct(s interface{}) bool { - v := reflect.ValueOf(s) - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - // uninitialized zero value of a struct - if v.Kind() == reflect.Invalid { - return false - } - - return v.Kind() == reflect.Struct -} - -// Name returns the structs's type name within its package. It returns an -// empty string for unnamed types. It panics if s's kind is not struct. -func Name(s interface{}) string { - return New(s).Name() -} diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go deleted file mode 100644 index 8859341c1..000000000 --- a/vendor/github.com/fatih/structs/tags.go +++ /dev/null @@ -1,32 +0,0 @@ -package structs - -import "strings" - -// tagOptions contains a slice of tag options -type tagOptions []string - -// Has returns true if the given optiton is available in tagOptions -func (t tagOptions) Has(opt string) bool { - for _, tagOpt := range t { - if tagOpt == opt { - return true - } - } - - return false -} - -// parseTag splits a struct field's tag into its name and a list of options -// which comes after a name. A tag is in the form of: "name,option1,option2". -// The name can be neglectected. -func parseTag(tag string) (string, tagOptions) { - // tag is one of followings: - // "" - // "name" - // "name,opt" - // "name,opt,opt2" - // ",opt" - - res := strings.Split(tag, ",") - return res[0], res[1:] -} diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE deleted file mode 100644 index 9d4735cad..000000000 --- a/vendor/github.com/gobwas/glob/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Sergey Kamardin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh deleted file mode 100755 index 804cf22e6..000000000 --- a/vendor/github.com/gobwas/glob/bench.sh +++ /dev/null @@ -1,26 +0,0 @@ -#! /bin/bash - -bench() { - filename="/tmp/$1-$2.bench" - if test -e "${filename}"; - then - echo "Already exists ${filename}" - else - backup=`git rev-parse --abbrev-ref HEAD` - git checkout $1 - echo -n "Creating ${filename}... " - go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem - echo "OK" - git checkout ${backup} - sleep 5 - fi -} - - -to=$1 -current=`git rev-parse --abbrev-ref HEAD` - -bench ${to} $2 -bench ${current} $2 - -benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go deleted file mode 100644 index 02e7de80a..000000000 --- a/vendor/github.com/gobwas/glob/compiler/compiler.go +++ /dev/null @@ -1,525 +0,0 @@ -package compiler - -// TODO use constructor with all matchers, and to their structs private -// TODO glue multiple Text nodes (like after QuoteMeta) - -import ( - "fmt" - "reflect" - - "github.com/gobwas/glob/match" - "github.com/gobwas/glob/syntax/ast" - "github.com/gobwas/glob/util/runes" -) - -func optimizeMatcher(matcher match.Matcher) match.Matcher { - switch m := matcher.(type) { - - case match.Any: - if len(m.Separators) == 0 { - return match.NewSuper() - } - - case match.AnyOf: - if len(m.Matchers) == 1 { - return m.Matchers[0] - } - - return m - - case match.List: - if m.Not == false && len(m.List) == 1 { - return match.NewText(string(m.List)) - } - - return m - - case match.BTree: - m.Left = optimizeMatcher(m.Left) - m.Right = optimizeMatcher(m.Right) - - r, ok := m.Value.(match.Text) - if !ok { - return m - } - - var ( - leftNil = m.Left == nil - rightNil = m.Right == nil - ) - if leftNil && rightNil { - return match.NewText(r.Str) - } - - _, leftSuper := m.Left.(match.Super) - lp, leftPrefix := m.Left.(match.Prefix) - la, leftAny := m.Left.(match.Any) - - _, rightSuper := m.Right.(match.Super) - rs, rightSuffix := m.Right.(match.Suffix) - ra, rightAny := m.Right.(match.Any) - - switch { - case leftSuper && rightSuper: - return match.NewContains(r.Str, false) - - case leftSuper && rightNil: - return match.NewSuffix(r.Str) - - case rightSuper && leftNil: - return match.NewPrefix(r.Str) - - case leftNil && rightSuffix: - return match.NewPrefixSuffix(r.Str, rs.Suffix) - - case rightNil && leftPrefix: - return match.NewPrefixSuffix(lp.Prefix, r.Str) - - case rightNil && leftAny: - return match.NewSuffixAny(r.Str, la.Separators) - - case leftNil && rightAny: - return match.NewPrefixAny(r.Str, ra.Separators) - } - - return m - } - - return matcher -} - -func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { - if len(matchers) == 0 { - return nil, fmt.Errorf("compile error: need at least one matcher") - } - if len(matchers) == 1 { - return matchers[0], nil - } - if m := glueMatchers(matchers); m != nil { - return m, nil - } - - idx := -1 - maxLen := -1 - var val match.Matcher - for i, matcher := range matchers { - if l := matcher.Len(); l != -1 && l >= maxLen { - maxLen = l - idx = i - val = matcher - } - } - - if val == nil { // not found matcher with static length - r, err := compileMatchers(matchers[1:]) - if err != nil { - return nil, err - } - return match.NewBTree(matchers[0], nil, r), nil - } - - left := matchers[:idx] - var right []match.Matcher - if len(matchers) > idx+1 { - right = matchers[idx+1:] - } - - var l, r match.Matcher - var err error - if len(left) > 0 { - l, err = compileMatchers(left) - if err != nil { - return nil, err - } - } - - if len(right) > 0 { - r, err = compileMatchers(right) - if err != nil { - return nil, err - } - } - - return match.NewBTree(val, l, r), nil -} - -func glueMatchers(matchers []match.Matcher) match.Matcher { - if m := glueMatchersAsEvery(matchers); m != nil { - return m - } - if m := glueMatchersAsRow(matchers); m != nil { - return m - } - return nil -} - -func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { - if len(matchers) <= 1 { - return nil - } - - var ( - c []match.Matcher - l int - ) - for _, matcher := range matchers { - if ml := matcher.Len(); ml == -1 { - return nil - } else { - c = append(c, matcher) - l += ml - } - } - return match.NewRow(l, c...) -} - -func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { - if len(matchers) <= 1 { - return nil - } - - var ( - hasAny bool - hasSuper bool - hasSingle bool - min int - separator []rune - ) - - for i, matcher := range matchers { - var sep []rune - - switch m := matcher.(type) { - case match.Super: - sep = []rune{} - hasSuper = true - - case match.Any: - sep = m.Separators - hasAny = true - - case match.Single: - sep = m.Separators - hasSingle = true - min++ - - case match.List: - if !m.Not { - return nil - } - sep = m.List - hasSingle = true - min++ - - default: - return nil - } - - // initialize - if i == 0 { - separator = sep - } - - if runes.Equal(sep, separator) { - continue - } - - return nil - } - - if hasSuper && !hasAny && !hasSingle { - return match.NewSuper() - } - - if hasAny && !hasSuper && !hasSingle { - return match.NewAny(separator) - } - - if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { - return match.NewMin(min) - } - - every := match.NewEveryOf() - - if min > 0 { - every.Add(match.NewMin(min)) - - if !hasAny && !hasSuper { - every.Add(match.NewMax(min)) - } - } - - if len(separator) > 0 { - every.Add(match.NewContains(string(separator), true)) - } - - return every -} - -func minimizeMatchers(matchers []match.Matcher) []match.Matcher { - var done match.Matcher - var left, right, count int - - for l := 0; l < len(matchers); l++ { - for r := len(matchers); r > l; r-- { - if glued := glueMatchers(matchers[l:r]); glued != nil { - var swap bool - - if done == nil { - swap = true - } else { - cl, gl := done.Len(), glued.Len() - swap = cl > -1 && gl > -1 && gl > cl - swap = swap || count < r-l - } - - if swap { - done = glued - left = l - right = r - count = r - l - } - } - } - } - - if done == nil { - return matchers - } - - next := append(append([]match.Matcher{}, matchers[:left]...), done) - if right < len(matchers) { - next = append(next, matchers[right:]...) - } - - if len(next) == len(matchers) { - return next - } - - return minimizeMatchers(next) -} - -// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree -func minimizeTree(tree *ast.Node) *ast.Node { - switch tree.Kind { - case ast.KindAnyOf: - return minimizeTreeAnyOf(tree) - default: - return nil - } -} - -// minimizeAnyOf tries to find common children of given node of AnyOf pattern -// it searches for common children from left and from right -// if any common children are found – then it returns new optimized ast tree -// else it returns nil -func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { - if !areOfSameKind(tree.Children, ast.KindPattern) { - return nil - } - - commonLeft, commonRight := commonChildren(tree.Children) - commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) - if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts - return nil - } - - var result []*ast.Node - if commonLeftCount > 0 { - result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) - } - - var anyOf []*ast.Node - for _, child := range tree.Children { - reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] - var node *ast.Node - if len(reuse) == 0 { - // this pattern is completely reduced by commonLeft and commonRight patterns - // so it become nothing - node = ast.NewNode(ast.KindNothing, nil) - } else { - node = ast.NewNode(ast.KindPattern, nil, reuse...) - } - anyOf = appendIfUnique(anyOf, node) - } - switch { - case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: - result = append(result, anyOf[0]) - case len(anyOf) > 1: - result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) - } - - if commonRightCount > 0 { - result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) - } - - return ast.NewNode(ast.KindPattern, nil, result...) -} - -func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { - if len(nodes) <= 1 { - return - } - - // find node that has least number of children - idx := leastChildren(nodes) - if idx == -1 { - return - } - tree := nodes[idx] - treeLength := len(tree.Children) - - // allocate max able size for rightCommon slice - // to get ability insert elements in reverse order (from end to start) - // without sorting - commonRight = make([]*ast.Node, treeLength) - lastRight := treeLength // will use this to get results as commonRight[lastRight:] - - var ( - breakLeft bool - breakRight bool - commonTotal int - ) - for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { - treeLeft := tree.Children[i] - treeRight := tree.Children[j] - - for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { - // skip least children node - if k == idx { - continue - } - - restLeft := nodes[k].Children[i] - restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] - - breakLeft = breakLeft || !treeLeft.Equal(restLeft) - - // disable searching for right common parts, if left part is already overlapping - breakRight = breakRight || (!breakLeft && j <= i) - breakRight = breakRight || !treeRight.Equal(restRight) - } - - if !breakLeft { - commonTotal++ - commonLeft = append(commonLeft, treeLeft) - } - if !breakRight { - commonTotal++ - lastRight = j - commonRight[j] = treeRight - } - } - - commonRight = commonRight[lastRight:] - - return -} - -func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { - for _, n := range target { - if reflect.DeepEqual(n, val) { - return target - } - } - return append(target, val) -} - -func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { - for _, n := range nodes { - if n.Kind != kind { - return false - } - } - return true -} - -func leastChildren(nodes []*ast.Node) int { - min := -1 - idx := -1 - for i, n := range nodes { - if idx == -1 || (len(n.Children) < min) { - min = len(n.Children) - idx = i - } - } - return idx -} - -func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { - var matchers []match.Matcher - for _, desc := range tree.Children { - m, err := compile(desc, sep) - if err != nil { - return nil, err - } - matchers = append(matchers, optimizeMatcher(m)) - } - return matchers, nil -} - -func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { - switch tree.Kind { - case ast.KindAnyOf: - // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) - if n := minimizeTree(tree); n != nil { - return compile(n, sep) - } - matchers, err := compileTreeChildren(tree, sep) - if err != nil { - return nil, err - } - return match.NewAnyOf(matchers...), nil - - case ast.KindPattern: - if len(tree.Children) == 0 { - return match.NewNothing(), nil - } - matchers, err := compileTreeChildren(tree, sep) - if err != nil { - return nil, err - } - m, err = compileMatchers(minimizeMatchers(matchers)) - if err != nil { - return nil, err - } - - case ast.KindAny: - m = match.NewAny(sep) - - case ast.KindSuper: - m = match.NewSuper() - - case ast.KindSingle: - m = match.NewSingle(sep) - - case ast.KindNothing: - m = match.NewNothing() - - case ast.KindList: - l := tree.Value.(ast.List) - m = match.NewList([]rune(l.Chars), l.Not) - - case ast.KindRange: - r := tree.Value.(ast.Range) - m = match.NewRange(r.Lo, r.Hi, r.Not) - - case ast.KindText: - t := tree.Value.(ast.Text) - m = match.NewText(t.Text) - - default: - return nil, fmt.Errorf("could not compile tree: unknown node type") - } - - return optimizeMatcher(m), nil -} - -func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { - m, err := compile(tree, sep) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go deleted file mode 100644 index 2afde343a..000000000 --- a/vendor/github.com/gobwas/glob/glob.go +++ /dev/null @@ -1,80 +0,0 @@ -package glob - -import ( - "github.com/gobwas/glob/compiler" - "github.com/gobwas/glob/syntax" -) - -// Glob represents compiled glob pattern. -type Glob interface { - Match(string) bool -} - -// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. -// The pattern syntax is: -// -// pattern: -// { term } -// -// term: -// `*` matches any sequence of non-separator characters -// `**` matches any sequence of characters -// `?` matches any single non-separator character -// `[` [ `!` ] { character-range } `]` -// character class (must be non-empty) -// `{` pattern-list `}` -// pattern alternatives -// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) -// `\` c matches character c -// -// character-range: -// c matches character c (c != `\\`, `-`, `]`) -// `\` c matches character c -// lo `-` hi matches character c for lo <= c <= hi -// -// pattern-list: -// pattern { `,` pattern } -// comma-separated (without spaces) patterns -// -func Compile(pattern string, separators ...rune) (Glob, error) { - ast, err := syntax.Parse(pattern) - if err != nil { - return nil, err - } - - matcher, err := compiler.Compile(ast, separators) - if err != nil { - return nil, err - } - - return matcher, nil -} - -// MustCompile is the same as Compile, except that if Compile returns error, this will panic -func MustCompile(pattern string, separators ...rune) Glob { - g, err := Compile(pattern, separators...) - if err != nil { - panic(err) - } - - return g -} - -// QuoteMeta returns a string that quotes all glob pattern meta characters -// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. -func QuoteMeta(s string) string { - b := make([]byte, 2*len(s)) - - // a byte loop is correct because all meta characters are ASCII - j := 0 - for i := 0; i < len(s); i++ { - if syntax.Special(s[i]) { - b[j] = '\\' - j++ - } - b[j] = s[i] - j++ - } - - return string(b[0:j]) -} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go deleted file mode 100644 index 514a9a5c4..000000000 --- a/vendor/github.com/gobwas/glob/match/any.go +++ /dev/null @@ -1,45 +0,0 @@ -package match - -import ( - "fmt" - "github.com/gobwas/glob/util/strings" -) - -type Any struct { - Separators []rune -} - -func NewAny(s []rune) Any { - return Any{s} -} - -func (self Any) Match(s string) bool { - return strings.IndexAnyRunes(s, self.Separators) == -1 -} - -func (self Any) Index(s string) (int, []int) { - found := strings.IndexAnyRunes(s, self.Separators) - switch found { - case -1: - case 0: - return 0, segments0 - default: - s = s[:found] - } - - segments := acquireSegments(len(s)) - for i := range s { - segments = append(segments, i) - } - segments = append(segments, len(s)) - - return 0, segments -} - -func (self Any) Len() int { - return lenNo -} - -func (self Any) String() string { - return fmt.Sprintf("", string(self.Separators)) -} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go deleted file mode 100644 index 8e65356cd..000000000 --- a/vendor/github.com/gobwas/glob/match/any_of.go +++ /dev/null @@ -1,82 +0,0 @@ -package match - -import "fmt" - -type AnyOf struct { - Matchers Matchers -} - -func NewAnyOf(m ...Matcher) AnyOf { - return AnyOf{Matchers(m)} -} - -func (self *AnyOf) Add(m Matcher) error { - self.Matchers = append(self.Matchers, m) - return nil -} - -func (self AnyOf) Match(s string) bool { - for _, m := range self.Matchers { - if m.Match(s) { - return true - } - } - - return false -} - -func (self AnyOf) Index(s string) (int, []int) { - index := -1 - - segments := acquireSegments(len(s)) - for _, m := range self.Matchers { - idx, seg := m.Index(s) - if idx == -1 { - continue - } - - if index == -1 || idx < index { - index = idx - segments = append(segments[:0], seg...) - continue - } - - if idx > index { - continue - } - - // here idx == index - segments = appendMerge(segments, seg) - } - - if index == -1 { - releaseSegments(segments) - return -1, nil - } - - return index, segments -} - -func (self AnyOf) Len() (l int) { - l = -1 - for _, m := range self.Matchers { - ml := m.Len() - switch { - case l == -1: - l = ml - continue - - case ml == -1: - return -1 - - case l != ml: - return -1 - } - } - - return -} - -func (self AnyOf) String() string { - return fmt.Sprintf("", self.Matchers) -} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go deleted file mode 100644 index 8302bf824..000000000 --- a/vendor/github.com/gobwas/glob/match/btree.go +++ /dev/null @@ -1,185 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type BTree struct { - Value Matcher - Left Matcher - Right Matcher - ValueLengthRunes int - LeftLengthRunes int - RightLengthRunes int - LengthRunes int -} - -func NewBTree(Value, Left, Right Matcher) (tree BTree) { - tree.Value = Value - tree.Left = Left - tree.Right = Right - - lenOk := true - if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { - lenOk = false - } - - if Left != nil { - if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { - lenOk = false - } - } - - if Right != nil { - if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { - lenOk = false - } - } - - if lenOk { - tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes - } else { - tree.LengthRunes = -1 - } - - return tree -} - -func (self BTree) Len() int { - return self.LengthRunes -} - -// todo? -func (self BTree) Index(s string) (index int, segments []int) { - //inputLen := len(s) - //// try to cut unnecessary parts - //// by knowledge of length of right and left part - //offset, limit := self.offsetLimit(inputLen) - //for offset < limit { - // // search for matching part in substring - // vi, segments := self.Value.Index(s[offset:limit]) - // if index == -1 { - // return -1, nil - // } - // if self.Left == nil { - // if index != offset { - // return -1, nil - // } - // } else { - // left := s[:offset+vi] - // i := self.Left.IndexSuffix(left) - // if i == -1 { - // return -1, nil - // } - // index = i - // } - // if self.Right != nil { - // for _, seg := range segments { - // right := s[:offset+vi+seg] - // } - // } - - // l := s[:offset+index] - // var left bool - // if self.Left != nil { - // left = self.Left.Index(l) - // } else { - // left = l == "" - // } - //} - - return -1, nil -} - -func (self BTree) Match(s string) bool { - inputLen := len(s) - // try to cut unnecessary parts - // by knowledge of length of right and left part - offset, limit := self.offsetLimit(inputLen) - - for offset < limit { - // search for matching part in substring - index, segments := self.Value.Index(s[offset:limit]) - if index == -1 { - releaseSegments(segments) - return false - } - - l := s[:offset+index] - var left bool - if self.Left != nil { - left = self.Left.Match(l) - } else { - left = l == "" - } - - if left { - for i := len(segments) - 1; i >= 0; i-- { - length := segments[i] - - var right bool - var r string - // if there is no string for the right branch - if inputLen <= offset+index+length { - r = "" - } else { - r = s[offset+index+length:] - } - - if self.Right != nil { - right = self.Right.Match(r) - } else { - right = r == "" - } - - if right { - releaseSegments(segments) - return true - } - } - } - - _, step := utf8.DecodeRuneInString(s[offset+index:]) - offset += index + step - - releaseSegments(segments) - } - - return false -} - -func (self BTree) offsetLimit(inputLen int) (offset int, limit int) { - // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part - // here we manipulating byte length for better optimizations - // but these checks still works, cause minLen of 1-rune string is 1 byte. - if self.LengthRunes != -1 && self.LengthRunes > inputLen { - return 0, 0 - } - if self.LeftLengthRunes >= 0 { - offset = self.LeftLengthRunes - } - if self.RightLengthRunes >= 0 { - limit = inputLen - self.RightLengthRunes - } else { - limit = inputLen - } - return offset, limit -} - -func (self BTree) String() string { - const n string = "" - var l, r string - if self.Left == nil { - l = n - } else { - l = self.Left.String() - } - if self.Right == nil { - r = n - } else { - r = self.Right.String() - } - - return fmt.Sprintf("%s]>", l, self.Value, r) -} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go deleted file mode 100644 index 0998e95b0..000000000 --- a/vendor/github.com/gobwas/glob/match/contains.go +++ /dev/null @@ -1,58 +0,0 @@ -package match - -import ( - "fmt" - "strings" -) - -type Contains struct { - Needle string - Not bool -} - -func NewContains(needle string, not bool) Contains { - return Contains{needle, not} -} - -func (self Contains) Match(s string) bool { - return strings.Contains(s, self.Needle) != self.Not -} - -func (self Contains) Index(s string) (int, []int) { - var offset int - - idx := strings.Index(s, self.Needle) - - if !self.Not { - if idx == -1 { - return -1, nil - } - - offset = idx + len(self.Needle) - if len(s) <= offset { - return 0, []int{offset} - } - s = s[offset:] - } else if idx != -1 { - s = s[:idx] - } - - segments := acquireSegments(len(s) + 1) - for i := range s { - segments = append(segments, offset+i) - } - - return 0, append(segments, offset+len(s)) -} - -func (self Contains) Len() int { - return lenNo -} - -func (self Contains) String() string { - var not string - if self.Not { - not = "!" - } - return fmt.Sprintf("", not, self.Needle) -} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go deleted file mode 100644 index 7c968ee36..000000000 --- a/vendor/github.com/gobwas/glob/match/every_of.go +++ /dev/null @@ -1,99 +0,0 @@ -package match - -import ( - "fmt" -) - -type EveryOf struct { - Matchers Matchers -} - -func NewEveryOf(m ...Matcher) EveryOf { - return EveryOf{Matchers(m)} -} - -func (self *EveryOf) Add(m Matcher) error { - self.Matchers = append(self.Matchers, m) - return nil -} - -func (self EveryOf) Len() (l int) { - for _, m := range self.Matchers { - if ml := m.Len(); l > 0 { - l += ml - } else { - return -1 - } - } - - return -} - -func (self EveryOf) Index(s string) (int, []int) { - var index int - var offset int - - // make `in` with cap as len(s), - // cause it is the maximum size of output segments values - next := acquireSegments(len(s)) - current := acquireSegments(len(s)) - - sub := s - for i, m := range self.Matchers { - idx, seg := m.Index(sub) - if idx == -1 { - releaseSegments(next) - releaseSegments(current) - return -1, nil - } - - if i == 0 { - // we use copy here instead of `current = seg` - // cause seg is a slice from reusable buffer `in` - // and it could be overwritten in next iteration - current = append(current, seg...) - } else { - // clear the next - next = next[:0] - - delta := index - (idx + offset) - for _, ex := range current { - for _, n := range seg { - if ex+delta == n { - next = append(next, n) - } - } - } - - if len(next) == 0 { - releaseSegments(next) - releaseSegments(current) - return -1, nil - } - - current = append(current[:0], next...) - } - - index = idx + offset - sub = s[index:] - offset += idx - } - - releaseSegments(next) - - return index, current -} - -func (self EveryOf) Match(s string) bool { - for _, m := range self.Matchers { - if !m.Match(s) { - return false - } - } - - return true -} - -func (self EveryOf) String() string { - return fmt.Sprintf("", self.Matchers) -} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go deleted file mode 100644 index 7fd763ecd..000000000 --- a/vendor/github.com/gobwas/glob/match/list.go +++ /dev/null @@ -1,49 +0,0 @@ -package match - -import ( - "fmt" - "github.com/gobwas/glob/util/runes" - "unicode/utf8" -) - -type List struct { - List []rune - Not bool -} - -func NewList(list []rune, not bool) List { - return List{list, not} -} - -func (self List) Match(s string) bool { - r, w := utf8.DecodeRuneInString(s) - if len(s) > w { - return false - } - - inList := runes.IndexRune(self.List, r) != -1 - return inList == !self.Not -} - -func (self List) Len() int { - return lenOne -} - -func (self List) Index(s string) (int, []int) { - for i, r := range s { - if self.Not == (runes.IndexRune(self.List, r) == -1) { - return i, segmentsByRuneLength[utf8.RuneLen(r)] - } - } - - return -1, nil -} - -func (self List) String() string { - var not string - if self.Not { - not = "!" - } - - return fmt.Sprintf("", not, string(self.List)) -} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go deleted file mode 100644 index f80e007fb..000000000 --- a/vendor/github.com/gobwas/glob/match/match.go +++ /dev/null @@ -1,81 +0,0 @@ -package match - -// todo common table of rune's length - -import ( - "fmt" - "strings" -) - -const lenOne = 1 -const lenZero = 0 -const lenNo = -1 - -type Matcher interface { - Match(string) bool - Index(string) (int, []int) - Len() int - String() string -} - -type Matchers []Matcher - -func (m Matchers) String() string { - var s []string - for _, matcher := range m { - s = append(s, fmt.Sprint(matcher)) - } - - return fmt.Sprintf("%s", strings.Join(s, ",")) -} - -// appendMerge merges and sorts given already SORTED and UNIQUE segments. -func appendMerge(target, sub []int) []int { - lt, ls := len(target), len(sub) - out := make([]int, 0, lt+ls) - - for x, y := 0, 0; x < lt || y < ls; { - if x >= lt { - out = append(out, sub[y:]...) - break - } - - if y >= ls { - out = append(out, target[x:]...) - break - } - - xValue := target[x] - yValue := sub[y] - - switch { - - case xValue == yValue: - out = append(out, xValue) - x++ - y++ - - case xValue < yValue: - out = append(out, xValue) - x++ - - case yValue < xValue: - out = append(out, yValue) - y++ - - } - } - - target = append(target[:0], out...) - - return target -} - -func reverseSegments(input []int) { - l := len(input) - m := l / 2 - - for i := 0; i < m; i++ { - input[i], input[l-i-1] = input[l-i-1], input[i] - } -} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go deleted file mode 100644 index d72f69eff..000000000 --- a/vendor/github.com/gobwas/glob/match/max.go +++ /dev/null @@ -1,49 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type Max struct { - Limit int -} - -func NewMax(l int) Max { - return Max{l} -} - -func (self Max) Match(s string) bool { - var l int - for range s { - l += 1 - if l > self.Limit { - return false - } - } - - return true -} - -func (self Max) Index(s string) (int, []int) { - segments := acquireSegments(self.Limit + 1) - segments = append(segments, 0) - var count int - for i, r := range s { - count++ - if count > self.Limit { - break - } - segments = append(segments, i+utf8.RuneLen(r)) - } - - return 0, segments -} - -func (self Max) Len() int { - return lenNo -} - -func (self Max) String() string { - return fmt.Sprintf("", self.Limit) -} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go deleted file mode 100644 index db57ac8eb..000000000 --- a/vendor/github.com/gobwas/glob/match/min.go +++ /dev/null @@ -1,57 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type Min struct { - Limit int -} - -func NewMin(l int) Min { - return Min{l} -} - -func (self Min) Match(s string) bool { - var l int - for range s { - l += 1 - if l >= self.Limit { - return true - } - } - - return false -} - -func (self Min) Index(s string) (int, []int) { - var count int - - c := len(s) - self.Limit + 1 - if c <= 0 { - return -1, nil - } - - segments := acquireSegments(c) - for i, r := range s { - count++ - if count >= self.Limit { - segments = append(segments, i+utf8.RuneLen(r)) - } - } - - if len(segments) == 0 { - return -1, nil - } - - return 0, segments -} - -func (self Min) Len() int { - return lenNo -} - -func (self Min) String() string { - return fmt.Sprintf("", self.Limit) -} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go deleted file mode 100644 index 0d4ecd36b..000000000 --- a/vendor/github.com/gobwas/glob/match/nothing.go +++ /dev/null @@ -1,27 +0,0 @@ -package match - -import ( - "fmt" -) - -type Nothing struct{} - -func NewNothing() Nothing { - return Nothing{} -} - -func (self Nothing) Match(s string) bool { - return len(s) == 0 -} - -func (self Nothing) Index(s string) (int, []int) { - return 0, segments0 -} - -func (self Nothing) Len() int { - return lenZero -} - -func (self Nothing) String() string { - return fmt.Sprintf("") -} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go deleted file mode 100644 index a7347250e..000000000 --- a/vendor/github.com/gobwas/glob/match/prefix.go +++ /dev/null @@ -1,50 +0,0 @@ -package match - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -type Prefix struct { - Prefix string -} - -func NewPrefix(p string) Prefix { - return Prefix{p} -} - -func (self Prefix) Index(s string) (int, []int) { - idx := strings.Index(s, self.Prefix) - if idx == -1 { - return -1, nil - } - - length := len(self.Prefix) - var sub string - if len(s) > idx+length { - sub = s[idx+length:] - } else { - sub = "" - } - - segments := acquireSegments(len(sub) + 1) - segments = append(segments, length) - for i, r := range sub { - segments = append(segments, length+i+utf8.RuneLen(r)) - } - - return idx, segments -} - -func (self Prefix) Len() int { - return lenNo -} - -func (self Prefix) Match(s string) bool { - return strings.HasPrefix(s, self.Prefix) -} - -func (self Prefix) String() string { - return fmt.Sprintf("", self.Prefix) -} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go deleted file mode 100644 index 8ee58fe1b..000000000 --- a/vendor/github.com/gobwas/glob/match/prefix_any.go +++ /dev/null @@ -1,55 +0,0 @@ -package match - -import ( - "fmt" - "strings" - "unicode/utf8" - - sutil "github.com/gobwas/glob/util/strings" -) - -type PrefixAny struct { - Prefix string - Separators []rune -} - -func NewPrefixAny(s string, sep []rune) PrefixAny { - return PrefixAny{s, sep} -} - -func (self PrefixAny) Index(s string) (int, []int) { - idx := strings.Index(s, self.Prefix) - if idx == -1 { - return -1, nil - } - - n := len(self.Prefix) - sub := s[idx+n:] - i := sutil.IndexAnyRunes(sub, self.Separators) - if i > -1 { - sub = sub[:i] - } - - seg := acquireSegments(len(sub) + 1) - seg = append(seg, n) - for i, r := range sub { - seg = append(seg, n+i+utf8.RuneLen(r)) - } - - return idx, seg -} - -func (self PrefixAny) Len() int { - return lenNo -} - -func (self PrefixAny) Match(s string) bool { - if !strings.HasPrefix(s, self.Prefix) { - return false - } - return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 -} - -func (self PrefixAny) String() string { - return fmt.Sprintf("", self.Prefix, string(self.Separators)) -} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go deleted file mode 100644 index 8208085a1..000000000 --- a/vendor/github.com/gobwas/glob/match/prefix_suffix.go +++ /dev/null @@ -1,62 +0,0 @@ -package match - -import ( - "fmt" - "strings" -) - -type PrefixSuffix struct { - Prefix, Suffix string -} - -func NewPrefixSuffix(p, s string) PrefixSuffix { - return PrefixSuffix{p, s} -} - -func (self PrefixSuffix) Index(s string) (int, []int) { - prefixIdx := strings.Index(s, self.Prefix) - if prefixIdx == -1 { - return -1, nil - } - - suffixLen := len(self.Suffix) - if suffixLen <= 0 { - return prefixIdx, []int{len(s) - prefixIdx} - } - - if (len(s) - prefixIdx) <= 0 { - return -1, nil - } - - segments := acquireSegments(len(s) - prefixIdx) - for sub := s[prefixIdx:]; ; { - suffixIdx := strings.LastIndex(sub, self.Suffix) - if suffixIdx == -1 { - break - } - - segments = append(segments, suffixIdx+suffixLen) - sub = sub[:suffixIdx] - } - - if len(segments) == 0 { - releaseSegments(segments) - return -1, nil - } - - reverseSegments(segments) - - return prefixIdx, segments -} - -func (self PrefixSuffix) Len() int { - return lenNo -} - -func (self PrefixSuffix) Match(s string) bool { - return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) -} - -func (self PrefixSuffix) String() string { - return fmt.Sprintf("", self.Prefix, self.Suffix) -} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go deleted file mode 100644 index ce30245a4..000000000 --- a/vendor/github.com/gobwas/glob/match/range.go +++ /dev/null @@ -1,48 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type Range struct { - Lo, Hi rune - Not bool -} - -func NewRange(lo, hi rune, not bool) Range { - return Range{lo, hi, not} -} - -func (self Range) Len() int { - return lenOne -} - -func (self Range) Match(s string) bool { - r, w := utf8.DecodeRuneInString(s) - if len(s) > w { - return false - } - - inRange := r >= self.Lo && r <= self.Hi - - return inRange == !self.Not -} - -func (self Range) Index(s string) (int, []int) { - for i, r := range s { - if self.Not != (r >= self.Lo && r <= self.Hi) { - return i, segmentsByRuneLength[utf8.RuneLen(r)] - } - } - - return -1, nil -} - -func (self Range) String() string { - var not string - if self.Not { - not = "!" - } - return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) -} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go deleted file mode 100644 index 4379042e4..000000000 --- a/vendor/github.com/gobwas/glob/match/row.go +++ /dev/null @@ -1,77 +0,0 @@ -package match - -import ( - "fmt" -) - -type Row struct { - Matchers Matchers - RunesLength int - Segments []int -} - -func NewRow(len int, m ...Matcher) Row { - return Row{ - Matchers: Matchers(m), - RunesLength: len, - Segments: []int{len}, - } -} - -func (self Row) matchAll(s string) bool { - var idx int - for _, m := range self.Matchers { - length := m.Len() - - var next, i int - for next = range s[idx:] { - i++ - if i == length { - break - } - } - - if i < length || !m.Match(s[idx:idx+next+1]) { - return false - } - - idx += next + 1 - } - - return true -} - -func (self Row) lenOk(s string) bool { - var i int - for range s { - i++ - if i > self.RunesLength { - return false - } - } - return self.RunesLength == i -} - -func (self Row) Match(s string) bool { - return self.lenOk(s) && self.matchAll(s) -} - -func (self Row) Len() (l int) { - return self.RunesLength -} - -func (self Row) Index(s string) (int, []int) { - for i := range s { - if len(s[i:]) < self.RunesLength { - break - } - if self.matchAll(s[i:]) { - return i, self.Segments - } - } - return -1, nil -} - -func (self Row) String() string { - return fmt.Sprintf("", self.RunesLength, self.Matchers) -} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go deleted file mode 100644 index 9ea6f3094..000000000 --- a/vendor/github.com/gobwas/glob/match/segments.go +++ /dev/null @@ -1,91 +0,0 @@ -package match - -import ( - "sync" -) - -type SomePool interface { - Get() []int - Put([]int) -} - -var segmentsPools [1024]sync.Pool - -func toPowerOfTwo(v int) int { - v-- - v |= v >> 1 - v |= v >> 2 - v |= v >> 4 - v |= v >> 8 - v |= v >> 16 - v++ - - return v -} - -const ( - cacheFrom = 16 - cacheToAndHigher = 1024 - cacheFromIndex = 15 - cacheToAndHigherIndex = 1023 -) - -var ( - segments0 = []int{0} - segments1 = []int{1} - segments2 = []int{2} - segments3 = []int{3} - segments4 = []int{4} -) - -var segmentsByRuneLength [5][]int = [5][]int{ - 0: segments0, - 1: segments1, - 2: segments2, - 3: segments3, - 4: segments4, -} - -func init() { - for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { - func(i int) { - segmentsPools[i-1] = sync.Pool{New: func() interface{} { - return make([]int, 0, i) - }} - }(i) - } -} - -func getTableIndex(c int) int { - p := toPowerOfTwo(c) - switch { - case p >= cacheToAndHigher: - return cacheToAndHigherIndex - case p <= cacheFrom: - return cacheFromIndex - default: - return p - 1 - } -} - -func acquireSegments(c int) []int { - // make []int with less capacity than cacheFrom - // is faster than acquiring it from pool - if c < cacheFrom { - return make([]int, 0, c) - } - - return segmentsPools[getTableIndex(c)].Get().([]int)[:0] -} - -func releaseSegments(s []int) { - c := cap(s) - - // make []int with less capacity than cacheFrom - // is faster than acquiring it from pool - if c < cacheFrom { - return - } - - segmentsPools[getTableIndex(c)].Put(s) -} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go deleted file mode 100644 index ee6e3954c..000000000 --- a/vendor/github.com/gobwas/glob/match/single.go +++ /dev/null @@ -1,43 +0,0 @@ -package match - -import ( - "fmt" - "github.com/gobwas/glob/util/runes" - "unicode/utf8" -) - -// single represents ? -type Single struct { - Separators []rune -} - -func NewSingle(s []rune) Single { - return Single{s} -} - -func (self Single) Match(s string) bool { - r, w := utf8.DecodeRuneInString(s) - if len(s) > w { - return false - } - - return runes.IndexRune(self.Separators, r) == -1 -} - -func (self Single) Len() int { - return lenOne -} - -func (self Single) Index(s string) (int, []int) { - for i, r := range s { - if runes.IndexRune(self.Separators, r) == -1 { - return i, segmentsByRuneLength[utf8.RuneLen(r)] - } - } - - return -1, nil -} - -func (self Single) String() string { - return fmt.Sprintf("", string(self.Separators)) -} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go deleted file mode 100644 index 85bea8c68..000000000 --- a/vendor/github.com/gobwas/glob/match/suffix.go +++ /dev/null @@ -1,35 +0,0 @@ -package match - -import ( - "fmt" - "strings" -) - -type Suffix struct { - Suffix string -} - -func NewSuffix(s string) Suffix { - return Suffix{s} -} - -func (self Suffix) Len() int { - return lenNo -} - -func (self Suffix) Match(s string) bool { - return strings.HasSuffix(s, self.Suffix) -} - -func (self Suffix) Index(s string) (int, []int) { - idx := strings.Index(s, self.Suffix) - if idx == -1 { - return -1, nil - } - - return 0, []int{idx + len(self.Suffix)} -} - -func (self Suffix) String() string { - return fmt.Sprintf("", self.Suffix) -} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go deleted file mode 100644 index c5106f819..000000000 --- a/vendor/github.com/gobwas/glob/match/suffix_any.go +++ /dev/null @@ -1,43 +0,0 @@ -package match - -import ( - "fmt" - "strings" - - sutil "github.com/gobwas/glob/util/strings" -) - -type SuffixAny struct { - Suffix string - Separators []rune -} - -func NewSuffixAny(s string, sep []rune) SuffixAny { - return SuffixAny{s, sep} -} - -func (self SuffixAny) Index(s string) (int, []int) { - idx := strings.Index(s, self.Suffix) - if idx == -1 { - return -1, nil - } - - i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 - - return i, []int{idx + len(self.Suffix) - i} -} - -func (self SuffixAny) Len() int { - return lenNo -} - -func (self SuffixAny) Match(s string) bool { - if !strings.HasSuffix(s, self.Suffix) { - return false - } - return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 -} - -func (self SuffixAny) String() string { - return fmt.Sprintf("", string(self.Separators), self.Suffix) -} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go deleted file mode 100644 index 3875950bb..000000000 --- a/vendor/github.com/gobwas/glob/match/super.go +++ /dev/null @@ -1,33 +0,0 @@ -package match - -import ( - "fmt" -) - -type Super struct{} - -func NewSuper() Super { - return Super{} -} - -func (self Super) Match(s string) bool { - return true -} - -func (self Super) Len() int { - return lenNo -} - -func (self Super) Index(s string) (int, []int) { - segments := acquireSegments(len(s) + 1) - for i := range s { - segments = append(segments, i) - } - segments = append(segments, len(s)) - - return 0, segments -} - -func (self Super) String() string { - return fmt.Sprintf("") -} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go deleted file mode 100644 index 0a17616d3..000000000 --- a/vendor/github.com/gobwas/glob/match/text.go +++ /dev/null @@ -1,45 +0,0 @@ -package match - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -// raw represents raw string to match -type Text struct { - Str string - RunesLength int - BytesLength int - Segments []int -} - -func NewText(s string) Text { - return Text{ - Str: s, - RunesLength: utf8.RuneCountInString(s), - BytesLength: len(s), - Segments: []int{len(s)}, - } -} - -func (self Text) Match(s string) bool { - return self.Str == s -} - -func (self Text) Len() int { - return self.RunesLength -} - -func (self Text) Index(s string) (int, []int) { - index := strings.Index(s, self.Str) - if index == -1 { - return -1, nil - } - - return index, self.Segments -} - -func (self Text) String() string { - return fmt.Sprintf("", self.Str) -} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md deleted file mode 100644 index f58144e73..000000000 --- a/vendor/github.com/gobwas/glob/readme.md +++ /dev/null @@ -1,148 +0,0 @@ -# glob.[go](https://golang.org) - -[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] - -> Go Globbing Library. - -## Install - -```shell - go get github.com/gobwas/glob -``` - -## Example - -```go - -package main - -import "github.com/gobwas/glob" - -func main() { - var g glob.Glob - - // create simple glob - g = glob.MustCompile("*.github.com") - g.Match("api.github.com") // true - - // quote meta characters and then create simple glob - g = glob.MustCompile(glob.QuoteMeta("*.github.com")) - g.Match("*.github.com") // true - - // create new glob with set of delimiters as ["."] - g = glob.MustCompile("api.*.com", '.') - g.Match("api.github.com") // true - g.Match("api.gi.hub.com") // false - - // create new glob with set of delimiters as ["."] - // but now with super wildcard - g = glob.MustCompile("api.**.com", '.') - g.Match("api.github.com") // true - g.Match("api.gi.hub.com") // true - - // create glob with single symbol wildcard - g = glob.MustCompile("?at") - g.Match("cat") // true - g.Match("fat") // true - g.Match("at") // false - - // create glob with single symbol wildcard and delimiters ['f'] - g = glob.MustCompile("?at", 'f') - g.Match("cat") // true - g.Match("fat") // false - g.Match("at") // false - - // create glob with character-list matchers - g = glob.MustCompile("[abc]at") - g.Match("cat") // true - g.Match("bat") // true - g.Match("fat") // false - g.Match("at") // false - - // create glob with character-list matchers - g = glob.MustCompile("[!abc]at") - g.Match("cat") // false - g.Match("bat") // false - g.Match("fat") // true - g.Match("at") // false - - // create glob with character-range matchers - g = glob.MustCompile("[a-c]at") - g.Match("cat") // true - g.Match("bat") // true - g.Match("fat") // false - g.Match("at") // false - - // create glob with character-range matchers - g = glob.MustCompile("[!a-c]at") - g.Match("cat") // false - g.Match("bat") // false - g.Match("fat") // true - g.Match("at") // false - - // create glob with pattern-alternatives list - g = glob.MustCompile("{cat,bat,[fr]at}") - g.Match("cat") // true - g.Match("bat") // true - g.Match("fat") // true - g.Match("rat") // true - g.Match("at") // false - g.Match("zat") // false -} - -``` - -## Performance - -This library is created for compile-once patterns. This means, that compilation could take time, but -strings matching is done faster, than in case when always parsing template. - -If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. - -Run `go test -bench=.` from source root to see the benchmarks: - -Pattern | Fixture | Match | Speed (ns/op) ---------|---------|-------|-------------- -`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 -`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 -`https://*.google.*` | `https://account.google.com` | `true` | 96 -`https://*.google.*` | `https://google.com` | `false` | 66 -`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 -`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 -`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 -`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 -`abc*` | `abcdef` | `true` | 8.15 -`abc*` | `af` | `false` | 5.68 -`*def` | `abcdef` | `true` | 8.84 -`*def` | `af` | `false` | 5.74 -`ab*ef` | `abcdef` | `true` | 15.2 -`ab*ef` | `af` | `false` | 10.4 - -The same things with `regexp` package: - -Pattern | Fixture | Match | Speed (ns/op) ---------|---------|-------|-------------- -`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 -`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 -`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 -`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 -`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 -`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 -`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 -`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 -`^abc.*$` | `abcdef` | `true` | 237 -`^abc.*$` | `af` | `false` | 100 -`^.*def$` | `abcdef` | `true` | 464 -`^.*def$` | `af` | `false` | 265 -`^ab.*ef$` | `abcdef` | `true` | 375 -`^ab.*ef$` | `af` | `false` | 145 - -[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg -[godoc-url]: https://godoc.org/github.com/gobwas/glob -[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master -[travis-url]: https://travis-ci.org/gobwas/glob - -## Syntax - -Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), -except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go deleted file mode 100644 index 3220a694a..000000000 --- a/vendor/github.com/gobwas/glob/syntax/ast/ast.go +++ /dev/null @@ -1,122 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -type Node struct { - Parent *Node - Children []*Node - Value interface{} - Kind Kind -} - -func NewNode(k Kind, v interface{}, ch ...*Node) *Node { - n := &Node{ - Kind: k, - Value: v, - } - for _, c := range ch { - Insert(n, c) - } - return n -} - -func (a *Node) Equal(b *Node) bool { - if a.Kind != b.Kind { - return false - } - if a.Value != b.Value { - return false - } - if len(a.Children) != len(b.Children) { - return false - } - for i, c := range a.Children { - if !c.Equal(b.Children[i]) { - return false - } - } - return true -} - -func (a *Node) String() string { - var buf bytes.Buffer - buf.WriteString(a.Kind.String()) - if a.Value != nil { - buf.WriteString(" =") - buf.WriteString(fmt.Sprintf("%v", a.Value)) - } - if len(a.Children) > 0 { - buf.WriteString(" [") - for i, c := range a.Children { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(c.String()) - } - buf.WriteString("]") - } - return buf.String() -} - -func Insert(parent *Node, children ...*Node) { - parent.Children = append(parent.Children, children...) - for _, ch := range children { - ch.Parent = parent - } -} - -type List struct { - Not bool - Chars string -} - -type Range struct { - Not bool - Lo, Hi rune -} - -type Text struct { - Text string -} - -type Kind int - -const ( - KindNothing Kind = iota - KindPattern - KindList - KindRange - KindText - KindAny - KindSuper - KindSingle - KindAnyOf -) - -func (k Kind) String() string { - switch k { - case KindNothing: - return "Nothing" - case KindPattern: - return "Pattern" - case KindList: - return "List" - case KindRange: - return "Range" - case KindText: - return "Text" - case KindAny: - return "Any" - case KindSuper: - return "Super" - case KindSingle: - return "Single" - case KindAnyOf: - return "AnyOf" - default: - return "" - } -} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go deleted file mode 100644 index 429b40943..000000000 --- a/vendor/github.com/gobwas/glob/syntax/ast/parser.go +++ /dev/null @@ -1,157 +0,0 @@ -package ast - -import ( - "errors" - "fmt" - "github.com/gobwas/glob/syntax/lexer" - "unicode/utf8" -) - -type Lexer interface { - Next() lexer.Token -} - -type parseFn func(*Node, Lexer) (parseFn, *Node, error) - -func Parse(lexer Lexer) (*Node, error) { - var parser parseFn - - root := NewNode(KindPattern, nil) - - var ( - tree *Node - err error - ) - for parser, tree = parserMain, root; parser != nil; { - parser, tree, err = parser(tree, lexer) - if err != nil { - return nil, err - } - } - - return root, nil -} - -func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { - for { - token := lex.Next() - switch token.Type { - case lexer.EOF: - return nil, tree, nil - - case lexer.Error: - return nil, tree, errors.New(token.Raw) - - case lexer.Text: - Insert(tree, NewNode(KindText, Text{token.Raw})) - return parserMain, tree, nil - - case lexer.Any: - Insert(tree, NewNode(KindAny, nil)) - return parserMain, tree, nil - - case lexer.Super: - Insert(tree, NewNode(KindSuper, nil)) - return parserMain, tree, nil - - case lexer.Single: - Insert(tree, NewNode(KindSingle, nil)) - return parserMain, tree, nil - - case lexer.RangeOpen: - return parserRange, tree, nil - - case lexer.TermsOpen: - a := NewNode(KindAnyOf, nil) - Insert(tree, a) - - p := NewNode(KindPattern, nil) - Insert(a, p) - - return parserMain, p, nil - - case lexer.Separator: - p := NewNode(KindPattern, nil) - Insert(tree.Parent, p) - - return parserMain, p, nil - - case lexer.TermsClose: - return parserMain, tree.Parent.Parent, nil - - default: - return nil, tree, fmt.Errorf("unexpected token: %s", token) - } - } - return nil, tree, fmt.Errorf("unknown error") -} - -func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { - var ( - not bool - lo rune - hi rune - chars string - ) - for { - token := lex.Next() - switch token.Type { - case lexer.EOF: - return nil, tree, errors.New("unexpected end") - - case lexer.Error: - return nil, tree, errors.New(token.Raw) - - case lexer.Not: - not = true - - case lexer.RangeLo: - r, w := utf8.DecodeRuneInString(token.Raw) - if len(token.Raw) > w { - return nil, tree, fmt.Errorf("unexpected length of lo character") - } - lo = r - - case lexer.RangeBetween: - // - - case lexer.RangeHi: - r, w := utf8.DecodeRuneInString(token.Raw) - if len(token.Raw) > w { - return nil, tree, fmt.Errorf("unexpected length of lo character") - } - - hi = r - - if hi < lo { - return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) - } - - case lexer.Text: - chars = token.Raw - - case lexer.RangeClose: - isRange := lo != 0 && hi != 0 - isChars := chars != "" - - if isChars == isRange { - return nil, tree, fmt.Errorf("could not parse range") - } - - if isRange { - Insert(tree, NewNode(KindRange, Range{ - Lo: lo, - Hi: hi, - Not: not, - })) - } else { - Insert(tree, NewNode(KindList, List{ - Chars: chars, - Not: not, - })) - } - - return parserMain, tree, nil - } - } -} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go deleted file mode 100644 index a1c8d1962..000000000 --- a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go +++ /dev/null @@ -1,273 +0,0 @@ -package lexer - -import ( - "bytes" - "fmt" - "github.com/gobwas/glob/util/runes" - "unicode/utf8" -) - -const ( - char_any = '*' - char_comma = ',' - char_single = '?' - char_escape = '\\' - char_range_open = '[' - char_range_close = ']' - char_terms_open = '{' - char_terms_close = '}' - char_range_not = '!' - char_range_between = '-' -) - -var specials = []byte{ - char_any, - char_single, - char_escape, - char_range_open, - char_range_close, - char_terms_open, - char_terms_close, -} - -func Special(c byte) bool { - return bytes.IndexByte(specials, c) != -1 -} - -type tokens []Token - -func (i *tokens) shift() (ret Token) { - ret = (*i)[0] - copy(*i, (*i)[1:]) - *i = (*i)[:len(*i)-1] - return -} - -func (i *tokens) push(v Token) { - *i = append(*i, v) -} - -func (i *tokens) empty() bool { - return len(*i) == 0 -} - -var eof rune = 0 - -type lexer struct { - data string - pos int - err error - - tokens tokens - termsLevel int - - lastRune rune - lastRuneSize int - hasRune bool -} - -func NewLexer(source string) *lexer { - l := &lexer{ - data: source, - tokens: tokens(make([]Token, 0, 4)), - } - return l -} - -func (l *lexer) Next() Token { - if l.err != nil { - return Token{Error, l.err.Error()} - } - if !l.tokens.empty() { - return l.tokens.shift() - } - - l.fetchItem() - return l.Next() -} - -func (l *lexer) peek() (r rune, w int) { - if l.pos == len(l.data) { - return eof, 0 - } - - r, w = utf8.DecodeRuneInString(l.data[l.pos:]) - if r == utf8.RuneError { - l.errorf("could not read rune") - r = eof - w = 0 - } - - return -} - -func (l *lexer) read() rune { - if l.hasRune { - l.hasRune = false - l.seek(l.lastRuneSize) - return l.lastRune - } - - r, s := l.peek() - l.seek(s) - - l.lastRune = r - l.lastRuneSize = s - - return r -} - -func (l *lexer) seek(w int) { - l.pos += w -} - -func (l *lexer) unread() { - if l.hasRune { - l.errorf("could not unread rune") - return - } - l.seek(-l.lastRuneSize) - l.hasRune = true -} - -func (l *lexer) errorf(f string, v ...interface{}) { - l.err = fmt.Errorf(f, v...) -} - -func (l *lexer) inTerms() bool { - return l.termsLevel > 0 -} - -func (l *lexer) termsEnter() { - l.termsLevel++ -} - -func (l *lexer) termsLeave() { - l.termsLevel-- -} - -var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} -var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) - -func (l *lexer) fetchItem() { - r := l.read() - switch { - case r == eof: - l.tokens.push(Token{EOF, ""}) - - case r == char_terms_open: - l.termsEnter() - l.tokens.push(Token{TermsOpen, string(r)}) - - case r == char_comma && l.inTerms(): - l.tokens.push(Token{Separator, string(r)}) - - case r == char_terms_close && l.inTerms(): - l.tokens.push(Token{TermsClose, string(r)}) - l.termsLeave() - - case r == char_range_open: - l.tokens.push(Token{RangeOpen, string(r)}) - l.fetchRange() - - case r == char_single: - l.tokens.push(Token{Single, string(r)}) - - case r == char_any: - if l.read() == char_any { - l.tokens.push(Token{Super, string(r) + string(r)}) - } else { - l.unread() - l.tokens.push(Token{Any, string(r)}) - } - - default: - l.unread() - - var breakers []rune - if l.inTerms() { - breakers = inTermsBreakers - } else { - breakers = inTextBreakers - } - l.fetchText(breakers) - } -} - -func (l *lexer) fetchRange() { - var wantHi bool - var wantClose bool - var seenNot bool - for { - r := l.read() - if r == eof { - l.errorf("unexpected end of input") - return - } - - if wantClose { - if r != char_range_close { - l.errorf("expected close range character") - } else { - l.tokens.push(Token{RangeClose, string(r)}) - } - return - } - - if wantHi { - l.tokens.push(Token{RangeHi, string(r)}) - wantClose = true - continue - } - - if !seenNot && r == char_range_not { - l.tokens.push(Token{Not, string(r)}) - seenNot = true - continue - } - - if n, w := l.peek(); n == char_range_between { - l.seek(w) - l.tokens.push(Token{RangeLo, string(r)}) - l.tokens.push(Token{RangeBetween, string(n)}) - wantHi = true - continue - } - - l.unread() // unread first peek and fetch as text - l.fetchText([]rune{char_range_close}) - wantClose = true - } -} - -func (l *lexer) fetchText(breakers []rune) { - var data []rune - var escaped bool - -reading: - for { - r := l.read() - if r == eof { - break - } - - if !escaped { - if r == char_escape { - escaped = true - continue - } - - if runes.IndexRune(breakers, r) != -1 { - l.unread() - break reading - } - } - - escaped = false - data = append(data, r) - } - - if len(data) > 0 { - l.tokens.push(Token{Text, string(data)}) - } -} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go deleted file mode 100644 index 2797c4e83..000000000 --- a/vendor/github.com/gobwas/glob/syntax/lexer/token.go +++ /dev/null @@ -1,88 +0,0 @@ -package lexer - -import "fmt" - -type TokenType int - -const ( - EOF TokenType = iota - Error - Text - Char - Any - Super - Single - Not - Separator - RangeOpen - RangeClose - RangeLo - RangeHi - RangeBetween - TermsOpen - TermsClose -) - -func (tt TokenType) String() string { - switch tt { - case EOF: - return "eof" - - case Error: - return "error" - - case Text: - return "text" - - case Char: - return "char" - - case Any: - return "any" - - case Super: - return "super" - - case Single: - return "single" - - case Not: - return "not" - - case Separator: - return "separator" - - case RangeOpen: - return "range_open" - - case RangeClose: - return "range_close" - - case RangeLo: - return "range_lo" - - case RangeHi: - return "range_hi" - - case RangeBetween: - return "range_between" - - case TermsOpen: - return "terms_open" - - case TermsClose: - return "terms_close" - - default: - return "undef" - } -} - -type Token struct { - Type TokenType - Raw string -} - -func (t Token) String() string { - return fmt.Sprintf("%v<%q>", t.Type, t.Raw) -} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go deleted file mode 100644 index 1d168b148..000000000 --- a/vendor/github.com/gobwas/glob/syntax/syntax.go +++ /dev/null @@ -1,14 +0,0 @@ -package syntax - -import ( - "github.com/gobwas/glob/syntax/ast" - "github.com/gobwas/glob/syntax/lexer" -) - -func Parse(s string) (*ast.Node, error) { - return ast.Parse(lexer.NewLexer(s)) -} - -func Special(b byte) bool { - return lexer.Special(b) -} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go deleted file mode 100644 index a72355641..000000000 --- a/vendor/github.com/gobwas/glob/util/runes/runes.go +++ /dev/null @@ -1,154 +0,0 @@ -package runes - -func Index(s, needle []rune) int { - ls, ln := len(s), len(needle) - - switch { - case ln == 0: - return 0 - case ln == 1: - return IndexRune(s, needle[0]) - case ln == ls: - if Equal(s, needle) { - return 0 - } - return -1 - case ln > ls: - return -1 - } - -head: - for i := 0; i < ls && ls-i >= ln; i++ { - for y := 0; y < ln; y++ { - if s[i+y] != needle[y] { - continue head - } - } - - return i - } - - return -1 -} - -func LastIndex(s, needle []rune) int { - ls, ln := len(s), len(needle) - - switch { - case ln == 0: - if ls == 0 { - return 0 - } - return ls - case ln == 1: - return IndexLastRune(s, needle[0]) - case ln == ls: - if Equal(s, needle) { - return 0 - } - return -1 - case ln > ls: - return -1 - } - -head: - for i := ls - 1; i >= 0 && i >= ln; i-- { - for y := ln - 1; y >= 0; y-- { - if s[i-(ln-y-1)] != needle[y] { - continue head - } - } - - return i - ln + 1 - } - - return -1 -} - -// IndexAny returns the index of the first instance of any Unicode code point -// from chars in s, or -1 if no Unicode code point from chars is present in s. -func IndexAny(s, chars []rune) int { - if len(chars) > 0 { - for i, c := range s { - for _, m := range chars { - if c == m { - return i - } - } - } - } - return -1 -} - -func Contains(s, needle []rune) bool { - return Index(s, needle) >= 0 -} - -func Max(s []rune) (max rune) { - for _, r := range s { - if r > max { - max = r - } - } - - return -} - -func Min(s []rune) rune { - min := rune(-1) - for _, r := range s { - if min == -1 { - min = r - continue - } - - if r < min { - min = r - } - } - - return min -} - -func IndexRune(s []rune, r rune) int { - for i, c := range s { - if c == r { - return i - } - } - return -1 -} - -func IndexLastRune(s []rune, r rune) int { - for i := len(s) - 1; i >= 0; i-- { - if s[i] == r { - return i - } - } - - return -1 -} - -func Equal(a, b []rune) bool { - if len(a) == len(b) { - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - - return true - } - - return false -} - -// HasPrefix tests whether the string s begins with prefix. -func HasPrefix(s, prefix []rune) bool { - return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) -} - -// HasSuffix tests whether the string s ends with suffix. -func HasSuffix(s, suffix []rune) bool { - return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) -} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go deleted file mode 100644 index e8ee1920b..000000000 --- a/vendor/github.com/gobwas/glob/util/strings/strings.go +++ /dev/null @@ -1,39 +0,0 @@ -package strings - -import ( - "strings" - "unicode/utf8" -) - -func IndexAnyRunes(s string, rs []rune) int { - for _, r := range rs { - if i := strings.IndexRune(s, r); i != -1 { - return i - } - } - - return -1 -} - -func LastIndexAnyRunes(s string, rs []rune) int { - for _, r := range rs { - i := -1 - if 0 <= r && r < utf8.RuneSelf { - i = strings.LastIndexByte(s, byte(r)) - } else { - sub := s - for len(sub) > 0 { - j := strings.IndexRune(s, r) - if j == -1 { - break - } - i = j - sub = sub[i+1:] - } - } - if i != -1 { - return i - } - } - return -1 -} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/consul/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index 7e64988f4..000000000 --- a/vendor/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,43 +0,0 @@ -Consul API client -================= - -This package provides the `api` package which attempts to -provide programmatic access to the full Consul API. - -Currently, all of the Consul APIs included in version 0.6.0 are supported. - -Documentation -============= - -The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) - -Usage -===== - -Below is an example of using the Consul client: - -```go -// Get a new client -client, err := api.NewClient(api.DefaultConfig()) -if err != nil { - panic(err) -} - -// Get a handle to the KV API -kv := client.KV() - -// PUT a new KV pair -p := &api.KVPair{Key: "foo", Value: []byte("test")} -_, err = kv.Put(p, nil) -if err != nil { - panic(err) -} - -// Lookup the pair -pair, _, err := kv.Get("foo", nil) -if err != nil { - panic(err) -} -fmt.Printf("KV: %v", pair) - -``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index c3fb0d53a..000000000 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,140 +0,0 @@ -package api - -const ( - // ACLCLientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -// ACLEntry is used to represent an ACL entry -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// Create is used to generate a new token with the given parameters -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index 1893d1cf3..000000000 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,471 +0,0 @@ -package api - -import ( - "bufio" - "fmt" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// AgentService represents a service known to the agent -type AgentService struct { - ID string - Service string - Tags []string - Port int - Address string - EnableTagOverride bool -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - EnableTagOverride bool `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck -} - -// AgentServiceCheck is used to define a node or service level check -type AgentServiceCheck struct { - Script string `json:",omitempty"` - DockerContainerID string `json:",omitempty"` - Shell string `json:",omitempty"` // Only supported for Docker. - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - TCP string `json:",omitempty"` - Status string `json:",omitempty"` - Notes string `json:",omitempty"` - TLSSkipVerify bool `json:",omitempty"` - - // In Consul 0.7 and later, checks that are associated with a service - // may also contain this optional DeregisterCriticalServiceAfter field, - // which is a timeout in the same Go time format as Interval and TTL. If - // a check is in the critical state for more than this configured value, - // then its associated service (and all of its associated checks) will - // automatically be deregistered. - DeregisterCriticalServiceAfter string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Reload triggers a configuration reload for the agent we are connected to. -func (a *Agent) Reload() error { - r := a.c.newRequest("PUT", "/v1/agent/reload") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// PassTTL is used to set a TTL check to the passing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) PassTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) WarnTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) FailTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "fail") -} - -// updateTTL is used to update the TTL of a check. This is the internal -// method that uses the old API that's present in Consul versions prior to -// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed -// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, -// but keep the old Pass/Warn/Fail methods using the old API under the hood. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 and the server endpoints will -// be removed in 0.9. -func (a *Agent) updateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// checkUpdate is the payload for a PUT for a check update. -type checkUpdate struct { - // Status is one of the api.Health* states: HealthPassing - // ("passing"), HealthWarning ("warning"), or HealthCritical - // ("critical"). - Status string - - // Output is the information to post to the UI for operators as the - // output of the process that decided to hit the TTL check. This is - // different from the note field that's associated with the check - // itself. - Output string -} - -// UpdateTTL is used to update the TTL of a check. This uses the newer API -// that was introduced in Consul 0.6.4 and later. We translate the old status -// strings for compatibility (though a newer version of Consul will still be -// required to use this API). -func (a *Agent) UpdateTTL(checkID, output, status string) error { - switch status { - case "pass", HealthPassing: - status = HealthPassing - case "warn", HealthWarning: - status = HealthWarning - case "fail", HealthCritical: - status = HealthCritical - default: - return fmt.Errorf("Invalid status: %s", status) - } - - endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) - r := a.c.newRequest("PUT", endpoint) - r.obj = &checkUpdate{ - Status: status, - Output: output, - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Leave is used to have the agent gracefully leave the cluster and shutdown -func (a *Agent) Leave() error { - r := a.c.newRequest("PUT", "/v1/agent/leave") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Monitor returns a channel which will receive streaming logs from the agent -// Providing a non-nil stopCh can be used to close the connection and stop the -// log stream -func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) { - r := a.c.newRequest("GET", "/v1/agent/monitor") - r.setQueryOptions(q) - if loglevel != "" { - r.params.Add("loglevel", loglevel) - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - - logCh := make(chan string, 64) - go func() { - defer resp.Body.Close() - - scanner := bufio.NewScanner(resp.Body) - for { - select { - case <-stopCh: - close(logCh) - return - default: - } - if scanner.Scan() { - logCh <- scanner.Text() - } - } - }() - - return logCh, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index f6fe5a1bd..000000000 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,649 +0,0 @@ -package api - -import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" -) - -const ( - // HTTPAddrEnvName defines an environment variable name which sets - // the HTTP address if there is no -http-addr specified. - HTTPAddrEnvName = "CONSUL_HTTP_ADDR" - - // HTTPTokenEnvName defines an environment variable name which sets - // the HTTP token. - HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" - - // HTTPAuthEnvName defines an environment variable name which sets - // the HTTP authentication header. - HTTPAuthEnvName = "CONSUL_HTTP_AUTH" - - // HTTPSSLEnvName defines an environment variable name which sets - // whether or not to use HTTPS. - HTTPSSLEnvName = "CONSUL_HTTP_SSL" - - // HTTPSSLVerifyEnvName defines an environment variable name which sets - // whether or not to disable certificate checking. - HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" -) - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // Near is used to provide a node name that will sort the results - // in ascending order based on the estimated round trip time from - // that node. Setting this to "_agent" will use the agent's node - // for the sort. - Near string - - // NodeMeta is used to filter results by nodes with the given - // metadata key/value pairs. Currently, only one key/value pair can - // be provided for filtering. - NodeMeta map[string]string - - // RelayFactor is used in keyring operations to cause reponses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // RelayFactor is used in keyring operations to cause reponses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration - - // Is address translation enabled for HTTP responses on this agent - AddressTranslationEnabled bool -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -type TLSConfig struct { - // Address is the optional address of the Consul server. The port, if any - // will be removed from here and this will be set to the ServerName of the - // resulting config. - Address string - - // CAFile is the optional path to the CA certificate used for Consul - // communication, defaults to the system bundle if not specified. - CAFile string - - // CertFile is the optional path to the certificate for Consul - // communication. If this is set then you need to also set KeyFile. - CertFile string - - // KeyFile is the optional path to the private key for Consul communication. - // If this is set then you need to also set CertFile. - KeyFile string - - // InsecureSkipVerify if set to true will disable TLS host verification. - InsecureSkipVerify bool -} - -// DefaultConfig returns a default configuration for the client. By default this -// will pool and reuse idle connections to Consul. If you have a long-lived -// client object, this is the desired behavior and should make the most efficient -// use of the connections to Consul. If you don't reuse a client object , which -// is not recommended, then you may notice idle connections building up over -// time. To avoid this, use the DefaultNonPooledConfig() instead. -func DefaultConfig() *Config { - return defaultConfig(cleanhttp.DefaultPooledTransport) -} - -// DefaultNonPooledConfig returns a default configuration for the client which -// does not pool connections. This isn't a recommended configuration because it -// will reconnect to Consul on every request, but this is useful to avoid the -// accumulation of idle connections if you make many client objects during the -// lifetime of your application. -func DefaultNonPooledConfig() *Config { - return defaultConfig(cleanhttp.DefaultTransport) -} - -// defaultConfig returns the default configuration for the client, using the -// given function to make the transport. -func defaultConfig(transportFn func() *http.Transport) *Config { - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - HttpClient: &http.Client{ - Transport: transportFn(), - }, - } - - if addr := os.Getenv(HTTPAddrEnvName); addr != "" { - config.Address = addr - } - - if token := os.Getenv(HTTPTokenEnvName); token != "" { - config.Token = token - } - - if auth := os.Getenv(HTTPAuthEnvName); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) - } - - if enabled { - config.Scheme = "https" - } - } - - if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" { - doVerify, err := strconv.ParseBool(verify) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) - } - - if !doVerify { - tlsClientConfig, err := SetupTLSConfig(&TLSConfig{ - InsecureSkipVerify: true, - }) - - // We don't expect this to fail given that we aren't - // parsing any of the input, but we panic just in case - // since this doesn't have an error return. - if err != nil { - panic(err) - } - - transport := transportFn() - transport.TLSClientConfig = tlsClientConfig - config.HttpClient.Transport = transport - } - } - - return config -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { - tlsClientConfig := &tls.Config{ - InsecureSkipVerify: tlsConfig.InsecureSkipVerify, - } - - if tlsConfig.Address != "" { - server := tlsConfig.Address - hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") - if hasPort { - var err error - server, _, err = net.SplitHostPort(server) - if err != nil { - return nil, err - } - } - tlsClientConfig.ServerName = server - } - - if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) - if err != nil { - return nil, err - } - tlsClientConfig.Certificates = []tls.Certificate{tlsCert} - } - - if tlsConfig.CAFile != "" { - data, err := ioutil.ReadFile(tlsConfig.CAFile) - if err != nil { - return nil, fmt.Errorf("failed to read CA file: %v", err) - } - - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(data) { - return nil, fmt.Errorf("failed to parse CA certificate") - } - tlsClientConfig.RootCAs = caPool - } - - return tlsClientConfig, nil -} - -// Client provides a client to the Consul API -type Client struct { - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if len(config.Address) == 0 { - config.Address = defConfig.Address - } - - if len(config.Scheme) == 0 { - config.Scheme = defConfig.Scheme - } - - if config.HttpClient == nil { - config.HttpClient = defConfig.HttpClient - } - - parts := strings.SplitN(config.Address, "://", 2) - if len(parts) == 2 { - switch parts[0] { - case "http": - case "https": - config.Scheme = "https" - case "unix": - trans := cleanhttp.DefaultTransport() - trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - } - config.HttpClient = &http.Client{ - Transport: trans, - } - default: - return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) - } - config.Address = parts[1] - } - - client := &Client{ - config: *config, - } - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - header http.Header - obj interface{} -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.Near != "" { - r.params.Set("near", q.Near) - } - if len(q.NodeMeta) > 0 { - for key, value := range q.NodeMeta { - r.params.Add("node-meta", key+":"+value) - } - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } -} - -// durToMsec converts a duration to a millisecond specified string. If the -// user selected a positive value that rounds to 0 ms, then we will use 1 ms -// so they get a short delay, otherwise Consul will translate the 0 ms into -// a huge default delay. -func durToMsec(dur time.Duration) string { - ms := dur / time.Millisecond - if dur > 0 && ms == 0 { - ms = 1 - } - return fmt.Sprintf("%dms", ms) -} - -// serverError is a string we look for to detect 500 errors. -const serverError = "Unexpected response code: 500" - -// IsServerError returns true for 500 errors from the Consul servers, these are -// usually retryable at a later time. -func IsServerError(err error) bool { - if err == nil { - return false - } - - // TODO (slackpad) - Make a real error type here instead of using - // a string check. - return strings.Contains(err.Error(), serverError) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - if b, err := encodeBody(r.obj); err != nil { - return nil, err - } else { - r.body = b - } - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - req.Header = r.header - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: path, - }, - params: make(map[string][]string), - header: make(http.Header), - } - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.header.Set("X-Consul-Token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Now().Sub(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index - index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - - // Parse X-Consul-Translate-Addresses - switch header.Get("X-Consul-Translate-Addresses") { - case "true": - q.AddressTranslationEnabled = true - default: - q.AddressTranslationEnabled = false - } - - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - return d, resp, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index 96226f11f..000000000 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,194 +0,0 @@ -package api - -type Node struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - Meta map[string]string -} - -type CatalogService struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - NodeMeta map[string]string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServicePort int - ServiceEnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogRegistration struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - NodeMeta map[string]string - Datacenter string - Service *AgentService - Check *AgentCheck -} - -type CatalogDeregistration struct { - Node string - Address string // Obsolete. - Datacenter string - ServiceID string - CheckID string -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go deleted file mode 100644 index ae8d16ee6..000000000 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ /dev/null @@ -1,67 +0,0 @@ -package api - -import ( - "github.com/hashicorp/serf/coordinate" -) - -// CoordinateEntry represents a node and its associated network coordinate. -type CoordinateEntry struct { - Node string - Coord *coordinate.Coordinate -} - -// CoordinateDatacenterMap has the coordinates for servers in a given datacenter -// and area. Network coordinates are only compatible within the same area. -type CoordinateDatacenterMap struct { - Datacenter string - AreaID string - Coordinates []CoordinateEntry -} - -// Coordinate can be used to query the coordinate endpoints -type Coordinate struct { - c *Client -} - -// Coordinate returns a handle to the coordinate endpoints -func (c *Client) Coordinate() *Coordinate { - return &Coordinate{c} -} - -// Datacenters is used to return the coordinates of all the servers in the WAN -// pool. -func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { - r := c.c.newRequest("GET", "/v1/coordinate/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*CoordinateDatacenterMap - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to return the coordinates of all the nodes in the LAN pool. -func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index 85b5b069b..000000000 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,104 +0,0 @@ -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index 8abe2393a..000000000 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,199 +0,0 @@ -package api - -import ( - "fmt" - "strings" -) - -const ( - // HealthAny is special, and is used as a wild card, - // not as a specific state. - HealthAny = "any" - HealthPassing = "passing" - HealthWarning = "warning" - HealthCritical = "critical" - HealthMaint = "maintenance" -) - -const ( - // NodeMaint is the special key set by a node in maintenance mode. - NodeMaint = "_node_maintenance" - - // ServiceMaintPrefix is the prefix for a service in maintenance mode. - ServiceMaintPrefix = "_service_maintenance:" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// HealthChecks is a collection of HealthCheck structs. -type HealthChecks []*HealthCheck - -// AggregatedStatus returns the "best" status for the list of health checks. -// Because a given entry may have many service and node-level health checks -// attached, this function determines the best representative of the status as -// as single string using the following heuristic: -// -// maintenance > critical > warning > passing -// -func (c HealthChecks) AggregatedStatus() string { - var passing, warning, critical, maintenance bool - for _, check := range c { - id := string(check.CheckID) - if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { - maintenance = true - continue - } - - switch check.Status { - case HealthPassing: - passing = true - case HealthWarning: - warning = true - case HealthCritical: - critical = true - default: - return "" - } - } - - switch { - case maintenance: - return HealthMaint - case critical: - return HealthCritical - case warning: - return HealthWarning - case passing: - return HealthPassing - default: - return HealthPassing - } -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks HealthChecks -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - if passingOnly { - r.params.Set(HealthPassing, "1") - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retrieve all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - switch state { - case HealthAny: - case HealthWarning: - case HealthCritical: - case HealthPassing: - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index 44e06bbb4..000000000 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,419 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - // Key is the name of the key. It is also part of the URL path when accessed - // via the API. - Key string - - // CreateIndex holds the index corresponding the creation of this KVPair. This - // is a read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // LockIndex holds the index corresponding to a lock on this key, if any. This - // is a read-only field. - LockIndex uint64 - - // Flags are any user-defined flags on the key. It is up to the implementer - // to check these values, since Consul does not treat them specially. - Flags uint64 - - // Value is the value for the key. This can be any value, but it will be - // base64 encoded upon transport. - Value []byte - - // Session is a string representing the ID of the session. Any other - // interactions with this key over the same session must specify the same - // session ID. - Session string -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KVOp constants give possible operations available in a KVTxn. -type KVOp string - -const ( - KVSet KVOp = "set" - KVDelete KVOp = "delete" - KVDeleteCAS KVOp = "delete-cas" - KVDeleteTree KVOp = "delete-tree" - KVCAS KVOp = "cas" - KVLock KVOp = "lock" - KVUnlock KVOp = "unlock" - KVGet KVOp = "get" - KVGetTree KVOp = "get-tree" - KVCheckSession KVOp = "check-session" - KVCheckIndex KVOp = "check-index" -) - -// KVTxnOp defines a single operation inside a transaction. -type KVTxnOp struct { - Verb KVOp - Key string - Value []byte - Flags uint64 - Index uint64 - Session string -} - -// KVTxnOps defines a set of operations to be performed inside a single -// transaction. -type KVTxnOps []*KVTxnOp - -// KVTxnResponse has the outcome of a transaction. -type KVTxnResponse struct { - Results []*KVPair - Errors TxnErrors -} - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key. The returned pointer -// to the KVPair will be nil if the key does not exist. -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - resp.Body.Close() - return nil, qm, nil - } else if resp.StatusCode != 200 { - resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisition operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - return res, qm, nil -} - -// TxnOp is the internal format we send to Consul. It's not specific to KV, -// though currently only KV operations are supported. -type TxnOp struct { - KV *KVTxnOp -} - -// TxnOps is a list of transaction operations. -type TxnOps []*TxnOp - -// TxnResult is the internal format we receive from Consul. -type TxnResult struct { - KV *KVPair -} - -// TxnResults is a list of TxnResult objects. -type TxnResults []*TxnResult - -// TxnError is used to return information about an operation in a transaction. -type TxnError struct { - OpIndex int - What string -} - -// TxnErrors is a list of TxnError objects. -type TxnErrors []*TxnError - -// TxnResponse is the internal format we receive from Consul. -type TxnResponse struct { - Results TxnResults - Errors TxnErrors -} - -// Txn is used to apply multiple KV operations in a single, atomic transaction. -// -// Note that Go will perform the required base64 encoding on the values -// automatically because the type is a byte slice. Transactions are defined as a -// list of operations to perform, using the KVOp constants and KVTxnOp structure -// to define operations. If any operation fails, none of the changes are applied -// to the state store. Note that this hides the internal raw transaction interface -// and munges the input and output types into KV-specific ones for ease of use. -// If there are more non-KV operations in the future we may break out a new -// transaction API client, but it will be easy to keep this KV-specific variant -// supported. -// -// Even though this is generally a write operation, we take a QueryOptions input -// and return a QueryMeta output. If the transaction contains only read ops, then -// Consul will fast-path it to a different endpoint internally which supports -// consistency controls, but not blocking. If there are write operations then -// the request will always be routed through raft and any consistency settings -// will be ignored. -// -// Here's an example: -// -// ops := KVTxnOps{ -// &KVTxnOp{ -// Verb: KVLock, -// Key: "test/lock", -// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", -// Value: []byte("hello"), -// }, -// &KVTxnOp{ -// Verb: KVGet, -// Key: "another/key", -// }, -// } -// ok, response, _, err := kv.Txn(&ops, nil) -// -// If there is a problem making the transaction request then an error will be -// returned. Otherwise, the ok value will be true if the transaction succeeded -// or false if it was rolled back. The response is a structured return value which -// will have the outcome of the transaction. Its Results member will have entries -// for each operation. Deleted keys will have a nil entry in the, and to save -// space, the Value of each key in the Results will be nil unless the operation -// is a KVGet. If the transaction was rolled back, the Errors member will have -// entries referencing the index of the operation that failed along with an error -// message. -func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { - r := k.c.newRequest("PUT", "/v1/txn") - r.setQueryOptions(q) - - // Convert into the internal format since this is an all-KV txn. - ops := make(TxnOps, 0, len(txn)) - for _, kvOp := range txn { - ops = append(ops, &TxnOp{KV: kvOp}) - } - r.obj = ops - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return false, nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { - var txnResp TxnResponse - if err := decodeBody(resp, &txnResp); err != nil { - return false, nil, nil, err - } - - // Convert from the internal format. - kvResp := KVTxnResponse{ - Errors: txnResp.Errors, - } - for _, result := range txnResp.Results { - kvResp.Results = append(kvResp.Results, result.KV) - } - return resp.StatusCode == http.StatusOK, &kvResp, qm, nil - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) -} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index 9f9845a43..000000000 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,384 +0,0 @@ -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in effect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // DefaultMonitorRetryTime is how long we wait after a failed monitor check - // of a lock (500 response code). This allows the monitor to ride out brief - // periods of unavailability, subject to the MonitorRetries setting in the - // lock options which is by default set to 0, disabling this feature. This - // affects locks and semaphores. - DefaultMonitorRetryTime = 2 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionOpts *SessionEntry // Optional, options to use when creating a session - SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) - SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime - LockTryOnce bool // Optional, defaults to false which means try forever -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.LockWaitTime == 0 { - opts.LockWaitTime = DefaultLockWaitTime - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - if s, err := l.createSession(); err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } else { - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - } - - // Setup the query options - kv := l.c.KV() - qOpts := &QueryOptions{ - WaitTime: l.opts.LockWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if l.opts.LockTryOnce && attempts > 0 { - elapsed := time.Now().Sub(start) - if elapsed > qOpts.WaitTime { - return nil, nil - } - - qOpts.WaitTime -= elapsed - } - attempts++ - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - locked, _, err = kv.Acquire(pair, nil) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, qOpts) - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - _, _, err := kv.Release(lockEnt, nil) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - pair, _, err := kv.Get(l.opts.Key, nil) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(pair, nil) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := l.opts.SessionOpts - if se == nil { - se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - } - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := l.opts.MonitorRetries -RETRY: - pair, meta, err := kv.Get(l.opts.Key, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsServerError(err) { - time.Sleep(l.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go deleted file mode 100644 index 079e22486..000000000 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// Operator can be used to perform low-level operator tasks for Consul. -type Operator struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Operator() *Operator { - return &Operator{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go deleted file mode 100644 index 7b0e461e9..000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ /dev/null @@ -1,168 +0,0 @@ -// The /v1/operator/area endpoints are available only in Consul Enterprise and -// interact with its network area subsystem. Network areas are used to link -// together Consul servers in different Consul datacenters. With network areas, -// Consul datacenters can be linked together in ways other than a fully-connected -// mesh, as is required for Consul's WAN. -package api - -import ( - "net" - "time" -) - -// Area defines a network area. -type Area struct { - // ID is this identifier for an area (a UUID). This must be left empty - // when creating a new area. - ID string - - // PeerDatacenter is the peer Consul datacenter that will make up the - // other side of this network area. Network areas always involve a pair - // of datacenters: the datacenter where the area was created, and the - // peer datacenter. This is required. - PeerDatacenter string - - // RetryJoin specifies the address of Consul servers to join to, such as - // an IPs or hostnames with an optional port number. This is optional. - RetryJoin []string -} - -// AreaJoinResponse is returned when a join occurs and gives the result for each -// address. -type AreaJoinResponse struct { - // The address that was joined. - Address string - - // Whether or not the join was a success. - Joined bool - - // If we couldn't join, this is the message with information. - Error string -} - -// SerfMember is a generic structure for reporting information about members in -// a Serf cluster. This is only used by the area endpoints right now, but this -// could be expanded to other endpoints in the future. -type SerfMember struct { - // ID is the node identifier (a UUID). - ID string - - // Name is the node name. - Name string - - // Addr has the IP address. - Addr net.IP - - // Port is the RPC port. - Port uint16 - - // Datacenter is the DC name. - Datacenter string - - // Role is "client", "server", or "unknown". - Role string - - // Build has the version of the Consul agent. - Build string - - // Protocol is the protocol of the Consul agent. - Protocol int - - // Status is the Serf health status "none", "alive", "leaving", "left", - // or "failed". - Status string - - // RTT is the estimated round trip time from the server handling the - // request to the this member. This will be negative if no RTT estimate - // is available. - RTT time.Duration -} - -// AreaCreate will create a new network area. The ID in the given structure must -// be empty and a generated ID will be returned on success. -func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("POST", "/v1/operator/area") - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaGet returns a single network area. -func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaList returns all the available network areas. -func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaDelete deletes the given network area. -func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { - r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// AreaJoin attempts to join the given set of join addresses to the given -// network area. See the Area structure for details about join addresses. -func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") - r.setWriteOptions(q) - r.obj = addresses - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out []*AreaJoinResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, wm, nil -} - -// AreaMembers lists the Serf information about the members in the given area. -func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { - var out []*SerfMember - qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go deleted file mode 100644 index 59471ecf9..000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ /dev/null @@ -1,215 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// AutopilotConfiguration is used for querying/setting the Autopilot configuration. -// Autopilot helps manage operator tasks related to Consul servers like removing -// failed servers from the Raft quorum. -type AutopilotConfiguration struct { - // CleanupDeadServers controls whether to remove dead servers from the Raft - // peer list when a new server joins - CleanupDeadServers bool - - // LastContactThreshold is the limit on the amount of time a server can go - // without leader contact before being considered unhealthy. - LastContactThreshold *ReadableDuration - - // MaxTrailingLogs is the amount of entries in the Raft Log that a server can - // be behind before being considered unhealthy. - MaxTrailingLogs uint64 - - // ServerStabilizationTime is the minimum amount of time a server must be - // in a stable, healthy state before it can be added to the cluster. Only - // applicable with Raft protocol version 3 or higher. - ServerStabilizationTime *ReadableDuration - - // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating - // servers into zones for redundancy. If left blank, this feature will be disabled. - RedundancyZoneTag string - - // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration - // strategy of waiting until enough newer-versioned servers have been added to the - // cluster before promoting them to voters. - DisableUpgradeMigration bool - - // CreateIndex holds the index corresponding the creation of this configuration. - // This is a read-only field. - CreateIndex uint64 - - // ModifyIndex will be set to the index of the last update when retrieving the - // Autopilot configuration. Resubmitting a configuration with - // AutopilotCASConfiguration will perform a check-and-set operation which ensures - // there hasn't been a subsequent update since the configuration was retrieved. - ModifyIndex uint64 -} - -// ServerHealth is the health (from the leader's point of view) of a server. -type ServerHealth struct { - // ID is the raft ID of the server. - ID string - - // Name is the node name of the server. - Name string - - // Address is the address of the server. - Address string - - // The status of the SerfHealth check for the server. - SerfStatus string - - // Version is the Consul version of the server. - Version string - - // Leader is whether this server is currently the leader. - Leader bool - - // LastContact is the time since this node's last contact with the leader. - LastContact *ReadableDuration - - // LastTerm is the highest leader term this server has a record of in its Raft log. - LastTerm uint64 - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 - - // Healthy is whether or not the server is healthy according to the current - // Autopilot config. - Healthy bool - - // Voter is whether this is a voting server. - Voter bool - - // StableSince is the last time this server's Healthy value changed. - StableSince time.Time -} - -// OperatorHealthReply is a representation of the overall health of the cluster -type OperatorHealthReply struct { - // Healthy is true if all the servers in the cluster are healthy. - Healthy bool - - // FailureTolerance is the number of healthy servers that could be lost without - // an outage occurring. - FailureTolerance int - - // Servers holds the health of each server. - Servers []ServerHealth -} - -// ReadableDuration is a duration type that is serialized to JSON in human readable format. -type ReadableDuration time.Duration - -func NewReadableDuration(dur time.Duration) *ReadableDuration { - d := ReadableDuration(dur) - return &d -} - -func (d *ReadableDuration) String() string { - return d.Duration().String() -} - -func (d *ReadableDuration) Duration() time.Duration { - if d == nil { - return time.Duration(0) - } - return time.Duration(*d) -} - -func (d *ReadableDuration) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil -} - -func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { - if d == nil { - return fmt.Errorf("cannot unmarshal to nil pointer") - } - - str := string(raw) - if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { - return fmt.Errorf("must be enclosed with quotes: %s", str) - } - dur, err := time.ParseDuration(str[1 : len(str)-1]) - if err != nil { - return err - } - *d = ReadableDuration(dur) - return nil -} - -// AutopilotGetConfiguration is used to query the current Autopilot configuration. -func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out AutopilotConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} - -// AutopilotSetConfiguration is used to set the current Autopilot configuration. -func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// AutopilotCASConfiguration is used to perform a Check-And-Set update on the -// Autopilot configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - - return res, nil -} - -// AutopilotServerHealth -func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/health") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out OperatorHealthReply - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go deleted file mode 100644 index 4f91c3543..000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ /dev/null @@ -1,83 +0,0 @@ -package api - -// keyringRequest is used for performing Keyring operations -type keyringRequest struct { - Key string -} - -// KeyringResponse is returned when listing the gossip encryption keys -type KeyringResponse struct { - // Whether this response is for a WAN ring - WAN bool - - // The datacenter name this request corresponds to - Datacenter string - - // A map of the encryption keys to the number of nodes they're installed on - Keys map[string]int - - // The total number of nodes in this ring - NumNodes int -} - -// KeyringInstall is used to install a new gossip encryption key into the cluster -func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { - r := op.c.newRequest("POST", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringList is used to list the gossip keys installed in the cluster -func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { - r := op.c.newRequest("GET", "/v1/operator/keyring") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*KeyringResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// KeyringRemove is used to remove a gossip encryption key from the cluster -func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringUse is used to change the active gossip encryption key -func (op *Operator) KeyringUse(key string, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go deleted file mode 100644 index 5f3c25b13..000000000 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Consul. - ID string - - // Node is the node name of the server, as known by Consul, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address string - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Consul. - Voter bool -} - -// RaftConfigration is returned when querying for the current Raft configuration. -type RaftConfiguration struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftGetConfiguration is used to query the current Raft peer set. -func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/raft/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out RaftConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by address in the form of -// "IP:port". -func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("address", string(address)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by ID. -func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("id", string(id)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go deleted file mode 100644 index ff210de3f..000000000 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ /dev/null @@ -1,198 +0,0 @@ -package api - -// QueryDatacenterOptions sets options about how we fail over if there are no -// healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { - // NearestN is set to the number of remote datacenters to try, based on - // network coordinates. - NearestN int - - // Datacenters is a fixed list of datacenters to try after NearestN. We - // never try a datacenter multiple times, so those are subtracted from - // this list before proceeding. - Datacenters []string -} - -// QueryDNSOptions controls settings when query results are served over DNS. -type QueryDNSOptions struct { - // TTL is the time to live for the served DNS results. - TTL string -} - -// ServiceQuery is used to query for a set of healthy nodes offering a specific -// service. -type ServiceQuery struct { - // Service is the service to query. - Service string - - // Near allows baking in the name of a node to automatically distance- - // sort from. The magic "_agent" value is supported, which sorts near - // the agent which initiated the request by default. - Near string - - // Failover controls what we do if there are no healthy nodes in the - // local datacenter. - Failover QueryDatacenterOptions - - // If OnlyPassing is true then we will only include nodes with passing - // health checks (critical AND warning checks will cause a node to be - // discarded) - OnlyPassing bool - - // Tags are a set of required and/or disallowed tags. If a tag is in - // this list it must be present. If the tag is preceded with "!" then - // it is disallowed. - Tags []string - - // NodeMeta is a map of required node metadata fields. If a key/value - // pair is in this map it must be present on the node in order for the - // service entry to be returned. - NodeMeta map[string]string -} - -// QueryTemplate carries the arguments for creating a templated query. -type QueryTemplate struct { - // Type specifies the type of the query template. Currently only - // "name_prefix_match" is supported. This field is required. - Type string - - // Regexp allows specifying a regex pattern to match against the name - // of the query being executed. - Regexp string -} - -// PrepatedQueryDefinition defines a complete prepared query. -type PreparedQueryDefinition struct { - // ID is this UUID-based ID for the query, always generated by Consul. - ID string - - // Name is an optional friendly name for the query supplied by the - // user. NOTE - if this feature is used then it will reduce the security - // of any read ACL associated with this query/service since this name - // can be used to locate nodes with supplying any ACL. - Name string - - // Session is an optional session to tie this query's lifetime to. If - // this is omitted then the query will not expire. - Session string - - // Token is the ACL token used when the query was created, and it is - // used when a query is subsequently executed. This token, or a token - // with management privileges, must be used to change the query later. - Token string - - // Service defines a service query (leaving things open for other types - // later). - Service ServiceQuery - - // DNS has options that control how the results of this query are - // served over DNS. - DNS QueryDNSOptions - - // Template is used to pass through the arguments for creating a - // prepared query with an attached template. If a template is given, - // interpolations are possible in other struct fields. - Template QueryTemplate -} - -// PreparedQueryExecuteResponse has the results of executing a query. -type PreparedQueryExecuteResponse struct { - // Service is the service that was queried. - Service string - - // Nodes has the nodes that were output by the query. - Nodes []ServiceEntry - - // DNS has the options for serving these results over DNS. - DNS QueryDNSOptions - - // Datacenter is the datacenter that these results came from. - Datacenter string - - // Failovers is a count of how many times we had to query a remote - // datacenter. - Failovers int -} - -// PreparedQuery can be used to query the prepared query endpoints. -type PreparedQuery struct { - c *Client -} - -// PreparedQuery returns a handle to the prepared query endpoints. -func (c *Client) PreparedQuery() *PreparedQuery { - return &PreparedQuery{c} -} - -// Create makes a new prepared query. The ID of the new query is returned. -func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/query") - r.setWriteOptions(q) - r.obj = query - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update makes updates to an existing prepared query. -func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { - return c.c.write("/v1/query/"+query.ID, query, nil, q) -} - -// List is used to fetch all the prepared queries (always requires a management -// token). -func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Get is used to fetch a specific prepared query. -func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query/"+queryID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// Execute is used to execute a specific prepared query. You can execute using -// a query ID or name. -func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { - var out *PreparedQueryExecuteResponse - qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 745a208c9..000000000 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index e6645ac1d..000000000 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,512 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime - SemaphoreTryOnce bool // Optional, defaults to false which means try forever -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.SemaphoreWaitTime == 0 { - opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encountered. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - if sess, err := s.createSession(); err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } else { - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - } - - // Create the contender entry - kv := s.c.KV() - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := &QueryOptions{ - WaitTime: s.opts.SemaphoreWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if s.opts.SemaphoreTryOnce && attempts > 0 { - elapsed := time.Now().Sub(start) - if elapsed > qOpts.WaitTime { - return nil, nil - } - - qOpts.WaitTime -= elapsed - } - attempts++ - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) -READ: - pair, _, err := kv.Get(key, nil) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, nil); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - pairs, _, err := kv.List(s.opts.Prefix, nil) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(lockPair, nil) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := s.opts.MonitorRetries -RETRY: - pairs, meta, err := kv.List(s.opts.Prefix, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsServerError(err) { - time.Sleep(s.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index 36e99a389..000000000 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,217 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -var ErrSessionExpired = errors.New("session expired") - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - Checks []string - LockDelay time.Duration - Behavior string - TTL string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["Checks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalidates a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - r := s.c.newRequest("PUT", "/v1/session/renew/"+id) - r.setWriteOptions(q) - rtt, resp, err := s.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - - if resp.StatusCode == 404 { - return nil, wm, nil - } else if resp.StatusCode != 200 { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - - var entries []*SessionEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - return ErrSessionExpired - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go deleted file mode 100644 index e902377dd..000000000 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ /dev/null @@ -1,47 +0,0 @@ -package api - -import ( - "io" -) - -// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of -// Consul's internal state and restore snapshots for disaster recovery. -type Snapshot struct { - c *Client -} - -// Snapshot returns a handle that exposes the snapshot endpoints. -func (c *Client) Snapshot() *Snapshot { - return &Snapshot{c} -} - -// Save requests a new snapshot and provides an io.ReadCloser with the snapshot -// data to save. If this doesn't return an error, then it's the responsibility -// of the caller to close it. Only a subset of the QueryOptions are supported: -// Datacenter, AllowStale, and Token. -func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { - r := s.c.newRequest("GET", "/v1/snapshot") - r.setQueryOptions(q) - - rtt, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return resp.Body, qm, nil -} - -// Restore streams in an existing snapshot and attempts to restore it. -func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { - r := s.c.newRequest("PUT", "/v1/snapshot") - r.body = in - r.setWriteOptions(q) - _, _, err := requireOK(s.c.doRequest(r)) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 74ef61a67..000000000 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -// Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/errwrap/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md deleted file mode 100644 index 1c95f5978..000000000 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# errwrap - -`errwrap` is a package for Go that formalizes the pattern of wrapping errors -and checking if an error contains another error. - -There is a common pattern in Go of taking a returned `error` value and -then wrapping it (such as with `fmt.Errorf`) before returning it. The problem -with this pattern is that you completely lose the original `error` structure. - -Arguably the _correct_ approach is that you should make a custom structure -implementing the `error` interface, and have the original error as a field -on that structure, such [as this example](http://golang.org/pkg/os/#PathError). -This is a good approach, but you have to know the entire chain of possible -rewrapping that happens, when you might just care about one. - -`errwrap` formalizes this pattern (it doesn't matter what approach you use -above) by giving a single interface for wrapping errors, checking if a specific -error is wrapped, and extracting that error. - -## Installation and Docs - -Install using `go get github.com/hashicorp/errwrap`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/errwrap - -## Usage - -#### Basic Usage - -Below is a very basic example of its usage: - -```go -// A function that always returns an error, but wraps it, like a real -// function might. -func tryOpen() error { - _, err := os.Open("/i/dont/exist") - if err != nil { - return errwrap.Wrapf("Doesn't exist: {{err}}", err) - } - - return nil -} - -func main() { - err := tryOpen() - - // We can use the Contains helpers to check if an error contains - // another error. It is safe to do this with a nil error, or with - // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, ErrNotExist) { - // Do something - } - if errwrap.ContainsType(err, new(os.PathError)) { - // Do something - } - - // Or we can use the associated `Get` functions to just extract - // a specific error. This would return nil if that specific error doesn't - // exist. - perr := errwrap.GetType(err, new(os.PathError)) -} -``` - -#### Custom Types - -If you're already making custom types that properly wrap errors, then -you can get all the functionality of `errwraps.Contains` and such by -implementing the `Wrapper` interface with just one function. Example: - -```go -type AppError { - Code ErrorCode - Err error -} - -func (e *AppError) WrappedErrors() []error { - return []error{e.Err} -} -``` - -Now this works: - -```go -err := &AppError{Err: fmt.Errorf("an error")} -if errwrap.ContainsType(err, fmt.Errorf("")) { - // This will work! -} -``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go deleted file mode 100644 index a733bef18..000000000 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package errwrap implements methods to formalize error wrapping in Go. -// -// All of the top-level functions that take an `error` are built to be able -// to take any error, not just wrapped errors. This allows you to use errwrap -// without having to type-check and type-cast everywhere. -package errwrap - -import ( - "errors" - "reflect" - "strings" -) - -// WalkFunc is the callback called for Walk. -type WalkFunc func(error) - -// Wrapper is an interface that can be implemented by custom types to -// have all the Contains, Get, etc. functions in errwrap work. -// -// When Walk reaches a Wrapper, it will call the callback for every -// wrapped error in addition to the wrapper itself. Since all the top-level -// functions in errwrap use Walk, this means that all those functions work -// with your custom type. -type Wrapper interface { - WrappedErrors() []error -} - -// Wrap defines that outer wraps inner, returning an error type that -// can be cleanly used with the other methods in this package, such as -// Contains, GetAll, etc. -// -// This function won't modify the error message at all (the outer message -// will be used). -func Wrap(outer, inner error) error { - return &wrappedError{ - Outer: outer, - Inner: inner, - } -} - -// Wrapf wraps an error with a formatting message. This is similar to using -// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap -// errors, you should replace it with this. -// -// format is the format of the error message. The string '{{err}}' will -// be replaced with the original error message. -func Wrapf(format string, err error) error { - outerMsg := "" - if err != nil { - outerMsg = err.Error() - } - - outer := errors.New(strings.Replace( - format, "{{err}}", outerMsg, -1)) - - return Wrap(outer, err) -} - -// Contains checks if the given error contains an error with the -// message msg. If err is not a wrapped error, this will always return -// false unless the error itself happens to match this msg. -func Contains(err error, msg string) bool { - return len(GetAll(err, msg)) > 0 -} - -// ContainsType checks if the given error contains an error with -// the same concrete type as v. If err is not a wrapped error, this will -// check the err itself. -func ContainsType(err error, v interface{}) bool { - return len(GetAllType(err, v)) > 0 -} - -// Get is the same as GetAll but returns the deepest matching error. -func Get(err error, msg string) error { - es := GetAll(err, msg) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetType is the same as GetAllType but returns the deepest matching error. -func GetType(err error, v interface{}) error { - es := GetAllType(err, v) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetAll gets all the errors that might be wrapped in err with the -// given message. The order of the errors is such that the outermost -// matching error (the most recent wrap) is index zero, and so on. -func GetAll(err error, msg string) []error { - var result []error - - Walk(err, func(err error) { - if err.Error() == msg { - result = append(result, err) - } - }) - - return result -} - -// GetAllType gets all the errors that are the same type as v. -// -// The order of the return value is the same as described in GetAll. -func GetAllType(err error, v interface{}) []error { - var result []error - - var search string - if v != nil { - search = reflect.TypeOf(v).String() - } - Walk(err, func(err error) { - var needle string - if err != nil { - needle = reflect.TypeOf(err).String() - } - - if needle == search { - result = append(result, err) - } - }) - - return result -} - -// Walk walks all the wrapped errors in err and calls the callback. If -// err isn't a wrapped error, this will be called once for err. If err -// is a wrapped error, the callback will be called for both the wrapper -// that implements error as well as the wrapped error itself. -func Walk(err error, cb WalkFunc) { - if err == nil { - return - } - - switch e := err.(type) { - case *wrappedError: - cb(e.Outer) - Walk(e.Inner, cb) - case Wrapper: - cb(err) - - for _, err := range e.WrappedErrors() { - Walk(err, cb) - } - default: - cb(err) - } -} - -// wrappedError is an implementation of error that has both the -// outer and inner errors. -type wrappedError struct { - Outer error - Inner error -} - -func (w *wrappedError) Error() string { - return w.Outer.Error() -} - -func (w *wrappedError) WrappedErrors() []error { - return []error{w.Outer, w.Inner} -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115e4..000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313f..000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index f4596d80c..000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,53 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "time" -) - -// DefaultTransport returns a new http.Transport with the same default values -// as http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - DisableKeepAlives: false, - MaxIdleConnsPerHost: 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with the same default values -// as http.Client, but with a shared Transport. Do not use this function -// for transient clients as it can leak file descriptors over time. Only use -// this for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 05841092a..000000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE deleted file mode 100644 index 82b4de97c..000000000 --- a/vendor/github.com/hashicorp/go-multierror/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md deleted file mode 100644 index e81be50e0..000000000 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# go-multierror - -`go-multierror` is a package for Go that provides a mechanism for -representing a list of `error` values as a single `error`. - -This allows a function in Go to return an `error` that might actually -be a list of errors. If the caller knows this, they can unwrap the -list and access the errors. If the caller doesn't know, the error -formats to a nice human-readable format. - -`go-multierror` implements the -[errwrap](https://github.com/hashicorp/errwrap) interface so that it can -be used with that library, as well. - -## Installation and Docs - -Install using `go get github.com/hashicorp/go-multierror`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror - -## Usage - -go-multierror is easy to use and purposely built to be unobtrusive in -existing Go applications/libraries that may not be aware of it. - -**Building a list of errors** - -The `Append` function is used to create a list of errors. This function -behaves a lot like the Go built-in `append` function: it doesn't matter -if the first argument is nil, a `multierror.Error`, or any other `error`, -the function behaves as you would expect. - -```go -var result error - -if err := step1(); err != nil { - result = multierror.Append(result, err) -} -if err := step2(); err != nil { - result = multierror.Append(result, err) -} - -return result -``` - -**Customizing the formatting of the errors** - -By specifying a custom `ErrorFormat`, you can customize the format -of the `Error() string` function: - -```go -var result *multierror.Error - -// ... accumulate errors here, maybe using Append - -if result != nil { - result.ErrorFormat = func([]error) string { - return "errors!" - } -} -``` - -**Accessing the list of errors** - -`multierror.Error` implements `error` so if the caller doesn't know about -multierror, it will work just fine. But if you're aware a multierror might -be returned, you can use type switches to access the list of errors: - -```go -if err := something(); err != nil { - if merr, ok := err.(*multierror.Error); ok { - // Use merr.Errors - } -} -``` - -**Returning a multierror only if there are errors** - -If you build a `multierror.Error`, you can use the `ErrorOrNil` function -to return an `error` implementation only if there are errors to return: - -```go -var result *multierror.Error - -// ... accumulate errors here - -// Return the `error` only if errors were added to the multierror, otherwise -// return nil since there are no errors. -return result.ErrorOrNil() -``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go deleted file mode 100644 index 00afa9b35..000000000 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -// Append is a helper function that will append more errors -// onto an Error in order to create a larger multi-error. -// -// If err is not a multierror.Error, then it will be turned into -// one. If any of the errs are multierr.Error, they will be flattened -// one level into err. -func Append(err error, errs ...error) *Error { - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Go through each error and flatten - for _, e := range errs { - switch e := e.(type) { - case *Error: - err.Errors = append(err.Errors, e.Errors...) - default: - err.Errors = append(err.Errors, e) - } - } - - return err - default: - newErrs := make([]error, 0, len(errs)+1) - if err != nil { - newErrs = append(newErrs, err) - } - newErrs = append(newErrs, errs...) - - return Append(&Error{}, newErrs...) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go deleted file mode 100644 index aab8e9abe..000000000 --- a/vendor/github.com/hashicorp/go-multierror/flatten.go +++ /dev/null @@ -1,26 +0,0 @@ -package multierror - -// Flatten flattens the given error, merging any *Errors together into -// a single *Error. -func Flatten(err error) error { - // If it isn't an *Error, just return the error as-is - if _, ok := err.(*Error); !ok { - return err - } - - // Otherwise, make the result and flatten away! - flatErr := new(Error) - flatten(err, flatErr) - return flatErr -} - -func flatten(err error, flatErr *Error) { - switch err := err.(type) { - case *Error: - for _, e := range err.Errors { - flatten(e, flatErr) - } - default: - flatErr.Errors = append(flatErr.Errors, err) - } -} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go deleted file mode 100644 index bb65a12e7..000000000 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ /dev/null @@ -1,23 +0,0 @@ -package multierror - -import ( - "fmt" - "strings" -) - -// ErrorFormatFunc is a function callback that is called by Error to -// turn the list of errors into a string. -type ErrorFormatFunc func([]error) string - -// ListFormatFunc is a basic formatter that outputs the number of errors -// that occurred along with a bullet point list of the errors. -func ListFormatFunc(es []error) string { - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d error(s) occurred:\n\n%s", - len(es), strings.Join(points, "\n")) -} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go deleted file mode 100644 index 2ea082732..000000000 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ /dev/null @@ -1,51 +0,0 @@ -package multierror - -import ( - "fmt" -) - -// Error is an error type to track multiple errors. This is used to -// accumulate errors in cases and return them as a single "error". -type Error struct { - Errors []error - ErrorFormat ErrorFormatFunc -} - -func (e *Error) Error() string { - fn := e.ErrorFormat - if fn == nil { - fn = ListFormatFunc - } - - return fn(e.Errors) -} - -// ErrorOrNil returns an error interface if this Error represents -// a list of errors, or returns nil if the list of errors is empty. This -// function is useful at the end of accumulation to make sure that the value -// returned represents the existence of errors. -func (e *Error) ErrorOrNil() error { - if e == nil { - return nil - } - if len(e.Errors) == 0 { - return nil - } - - return e -} - -func (e *Error) GoString() string { - return fmt.Sprintf("*%#v", *e) -} - -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. -// -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to -// satisfy the errwrap.Wrapper interface. -func (e *Error) WrappedErrors() []error { - return e.Errors -} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go deleted file mode 100644 index 5c477abe4..000000000 --- a/vendor/github.com/hashicorp/go-multierror/prefix.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -// Prefix is a helper function that will prefix some text -// to the given error. If the error is a multierror.Error, then -// it will be prefixed to each wrapped error. -// -// This is useful to use when appending multiple multierrors -// together in order to give better scoping. -func Prefix(err error, prefix string) error { - if err == nil { - return nil - } - - format := fmt.Sprintf("%s {{err}}", prefix) - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Wrap each of the errors - for i, e := range err.Errors { - err.Errors[i] = errwrap.Wrapf(format, e) - } - - return err - default: - return errwrap.Wrapf(format, err) - } -} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE deleted file mode 100644 index e87a115e4..000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile deleted file mode 100644 index da17640e6..000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: - go vet ./... - go test -race ./... - -updatedeps: - go get -f -t -u ./... - go get -f -u ./... - -.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md deleted file mode 100644 index 0d6f9ed40..000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ /dev/null @@ -1,43 +0,0 @@ -go-retryablehttp -================ - -[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] - -[travis]: http://travis-ci.org/hashicorp/go-retryablehttp -[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp - -The `retryablehttp` package provides a familiar HTTP client interface with -automatic retries and exponential backoff. It is a thin wrapper over the -standard `net/http` client library and exposes nearly the same public API. This -makes `retryablehttp` very easy to drop into existing programs. - -`retryablehttp` performs automatic retries under certain conditions. Mainly, if -an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received, then a retry is invoked after a wait period. -Otherwise, the response is returned and left to the caller to interpret. - -The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) require an `io.ReadSeeker` to be provided. This enables the -request body to be "rewound" if the initial request fails so that the full -request can be attempted again. - -Example Use -=========== - -Using this library should look almost identical to what you would do with -`net/http`. The most simple example of a GET request is shown below: - -```go -resp, err := retryablehttp.Get("/foo") -if err != nil { - panic(err) -} -``` - -The returned response object is an `*http.Response`, the same thing you would -usually get from `net/http`. Had the request failed one or more times, the above -call would block and retry with exponential backoff. - -For more usage and examples see the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go deleted file mode 100644 index d0ec6b2ab..000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ /dev/null @@ -1,302 +0,0 @@ -// The retryablehttp package provides a familiar HTTP client interface with -// automatic retries and exponential backoff. It is a thin wrapper over the -// standard net/http client library and exposes nearly the same public API. -// This makes retryablehttp very easy to drop into existing programs. -// -// retryablehttp performs automatic retries under certain conditions. Mainly, if -// an error is returned by the client (connection errors etc), or if a 500-range -// response is received, then a retry is invoked. Otherwise, the response is -// returned and left to the caller to interpret. -// -// The main difference from net/http is that requests which take a request body -// (POST/PUT et. al) require an io.ReadSeeker to be provided. This enables the -// request body to be "rewound" if the initial request fails so that the full -// request can be attempted again. -package retryablehttp - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" -) - -var ( - // Default retry configuration - defaultRetryWaitMin = 1 * time.Second - defaultRetryWaitMax = 5 * time.Minute - defaultRetryMax = 32 - - // defaultClient is used for performing requests without explicitly making - // a new client. It is purposely private to avoid modifications. - defaultClient = NewClient() - - // We need to consume response bodies to maintain http connections, but - // limit the size we consume to respReadLimit. - respReadLimit = int64(4096) -) - -// LenReader is an interface implemented by many in-memory io.Reader's. Used -// for automatically sending the right Content-Length header when possible. -type LenReader interface { - Len() int -} - -// Request wraps the metadata needed to create HTTP requests. -type Request struct { - // body is a seekable reader over the request body payload. This is - // used to rewind the request data in between retries. - body io.ReadSeeker - - // Embed an HTTP request directly. This makes a *Request act exactly - // like an *http.Request so that all meta methods are supported. - *http.Request -} - -// NewRequest creates a new wrapped request. -func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { - // Wrap the body in a noop ReadCloser if non-nil. This prevents the - // reader from being closed by the HTTP client. - var rcBody io.ReadCloser - if body != nil { - rcBody = ioutil.NopCloser(body) - } - - // Make the request with the noop-closer for the body. - httpReq, err := http.NewRequest(method, url, rcBody) - if err != nil { - return nil, err - } - - // Check if we can set the Content-Length automatically. - if lr, ok := body.(LenReader); ok { - httpReq.ContentLength = int64(lr.Len()) - } - - return &Request{body, httpReq}, nil -} - -// RequestLogHook allows a function to run before each retry. The HTTP -// request which will be made, and the retry number (0 for the initial -// request) are available to users. The internal logger is exposed to -// consumers. -type RequestLogHook func(*log.Logger, *http.Request, int) - -// ResponseLogHook is like RequestLogHook, but allows running a function -// on each HTTP response. This function will be invoked at the end of -// every HTTP request executed, regardless of whether a subsequent retry -// needs to be performed or not. If the response body is read or closed -// from this method, this will affect the response returned from Do(). -type ResponseLogHook func(*log.Logger, *http.Response) - -// CheckRetry specifies a policy for handling retries. It is called -// following each request with the response and error values returned by -// the http.Client. If CheckRetry returns false, the Client stops retrying -// and returns the response to the caller. If CheckRetry returns an error, -// that error value is returned in lieu of the error from the request. The -// Client will close any response body when retrying, but if the retry is -// aborted it is up to the CheckResponse callback to properly close any -// response body before returning. -type CheckRetry func(resp *http.Response, err error) (bool, error) - -// Client is used to make HTTP requests. It adds additional functionality -// like automatic retries to tolerate minor outages. -type Client struct { - HTTPClient *http.Client // Internal HTTP client. - Logger *log.Logger // Customer logger instance. - - RetryWaitMin time.Duration // Minimum time to wait - RetryWaitMax time.Duration // Maximum time to wait - RetryMax int // Maximum number of retries - - // RequestLogHook allows a user-supplied function to be called - // before each retry. - RequestLogHook RequestLogHook - - // ResponseLogHook allows a user-supplied function to be called - // with the response from each HTTP request executed. - ResponseLogHook ResponseLogHook - - // CheckRetry specifies the policy for handling retries, and is called - // after each request. The default policy is DefaultRetryPolicy. - CheckRetry CheckRetry -} - -// NewClient creates a new Client with default settings. -func NewClient() *Client { - return &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Logger: log.New(os.Stderr, "", log.LstdFlags), - RetryWaitMin: defaultRetryWaitMin, - RetryWaitMax: defaultRetryWaitMax, - RetryMax: defaultRetryMax, - CheckRetry: DefaultRetryPolicy, - } -} - -// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which -// will retry on connection errors and server errors. -func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { - if err != nil { - return true, err - } - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. This will catch - // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || resp.StatusCode >= 500 { - return true, nil - } - - return false, nil -} - -// Do wraps calling an HTTP method with retries. -func (c *Client) Do(req *Request) (*http.Response, error) { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) - - for i := 0; ; i++ { - var code int // HTTP response code - - // Always rewind the request body when non-nil. - if req.body != nil { - if _, err := req.body.Seek(0, 0); err != nil { - return nil, fmt.Errorf("failed to seek body: %v", err) - } - } - - if c.RequestLogHook != nil { - c.RequestLogHook(c.Logger, req.Request, i) - } - - // Attempt the request - resp, err := c.HTTPClient.Do(req.Request) - - // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(resp, err) - - if err != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) - } else { - // Call this here to maintain the behavior of logging all requests, - // even if CheckRetry signals to stop. - if c.ResponseLogHook != nil { - // Call the response logger function if provided. - c.ResponseLogHook(c.Logger, resp) - } - } - - // Now decide if we should continue. - if !checkOK { - if checkErr != nil { - err = checkErr - } - return resp, err - } - - // We're going to retry, consume any response to reuse the connection. - if err == nil { - c.drainBody(resp.Body) - } - - remain := c.RetryMax - i - if remain == 0 { - break - } - wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i) - desc := fmt.Sprintf("%s %s", req.Method, req.URL) - if code > 0 { - desc = fmt.Sprintf("%s (status: %d)", desc, code) - } - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) - time.Sleep(wait) - } - - // Return an error if we fall out of the retry loop - return nil, fmt.Errorf("%s %s giving up after %d attempts", - req.Method, req.URL, c.RetryMax+1) -} - -// Try to read the response body so we can reuse this connection. -func (c *Client) drainBody(body io.ReadCloser) { - defer body.Close() - _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) - if err != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) - } -} - -// Get is a shortcut for doing a GET request without making a new client. -func Get(url string) (*http.Response, error) { - return defaultClient.Get(url) -} - -// Get is a convenience helper for doing simple GET requests. -func (c *Client) Get(url string) (*http.Response, error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Head is a shortcut for doing a HEAD request without making a new client. -func Head(url string) (*http.Response, error) { - return defaultClient.Head(url) -} - -// Head is a convenience method for doing simple HEAD requests. -func (c *Client) Head(url string) (*http.Response, error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.Do(req) -} - -// Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { - return defaultClient.Post(url, bodyType, body) -} - -// Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return c.Do(req) -} - -// PostForm is a shortcut to perform a POST with form data without creating -// a new client. -func PostForm(url string, data url.Values) (*http.Response, error) { - return defaultClient.PostForm(url, data) -} - -// PostForm is a convenience method for doing simple POST operations using -// pre-filled url.Values form data. -func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// backoff is used to calculate how long to sleep before retrying -// after observing failures. It takes the minimum/maximum wait time and -// iteration, and returns the duration to wait. -func backoff(min, max time.Duration, iter int) time.Duration { - mult := math.Pow(2, float64(iter)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE deleted file mode 100644 index e87a115e4..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile deleted file mode 100644 index c3989e789..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -TEST?=./... - -test: - go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 - go vet $(TEST) - go test $(TEST) -race - -.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md deleted file mode 100644 index f5abffc29..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# rootcerts - -Functions for loading root certificates for TLS connections. - ------ - -Go's standard library `crypto/tls` provides a common mechanism for configuring -TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool -of certificates for the client to use as a trust store when verifying server -certificates. - -This library contains utility functions for loading certificates destined for -that field, as well as one other important thing: - -When the `RootCAs` field is `nil`, the standard library attempts to load the -host's root CA set. This behavior is OS-specific, and the Darwin -implementation contains [a bug that prevents trusted certificates from the -System and Login keychains from being loaded][1]. This library contains -Darwin-specific behavior that works around that bug. - -[1]: https://github.com/golang/go/issues/14514 - -## Example Usage - -Here's a snippet demonstrating how this library is meant to be used: - -```go -func httpClient() (*http.Client, error) - tlsConfig := &tls.Config{} - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv("MYAPP_CAFILE"), - CAPath: os.Getenv("MYAPP_CAPATH"), - }) - if err != nil { - return nil, err - } - c := cleanhttp.DefaultClient() - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - c.Transport = t - return c, nil -} -``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go deleted file mode 100644 index b55cc6284..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package rootcerts contains functions to aid in loading CA certificates for -// TLS connections. -// -// In addition, its default behavior on Darwin works around an open issue [1] -// in Go's crypto/x509 that prevents certicates from being loaded from the -// System or Login keychains. -// -// [1] https://github.com/golang/go/issues/14514 -package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go deleted file mode 100644 index aeb30ece3..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go +++ /dev/null @@ -1,103 +0,0 @@ -package rootcerts - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// Config determines where LoadCACerts will load certificates from. When both -// CAFile and CAPath are blank, this library's functions will either load -// system roots explicitly and return them, or set the CertPool to nil to allow -// Go's standard library to load system certs. -type Config struct { - // CAFile is a path to a PEM-encoded certificate file or bundle. Takes - // precedence over CAPath. - CAFile string - - // CAPath is a path to a directory populated with PEM-encoded certificates. - CAPath string -} - -// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the -// Config specified. -func ConfigureTLS(t *tls.Config, c *Config) error { - if t == nil { - return nil - } - pool, err := LoadCACerts(c) - if err != nil { - return err - } - t.RootCAs = pool - return nil -} - -// LoadCACerts loads a CertPool based on the Config specified. -func LoadCACerts(c *Config) (*x509.CertPool, error) { - if c == nil { - c = &Config{} - } - if c.CAFile != "" { - return LoadCAFile(c.CAFile) - } - if c.CAPath != "" { - return LoadCAPath(c.CAPath) - } - - return LoadSystemCAs() -} - -// LoadCAFile loads a single PEM-encoded file from the path specified. -func LoadCAFile(caFile string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Error loading CA File: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) - } - - return pool, nil -} - -// LoadCAPath walks the provided path and loads all certificates encounted into -// a pool. -func LoadCAPath(caPath string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - pem, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("Error loading file from CAPath: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) - } - - return nil - } - - err := filepath.Walk(caPath, walkFn) - if err != nil { - return nil, err - } - - return pool, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go deleted file mode 100644 index 66b1472c4..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !darwin - -package rootcerts - -import "crypto/x509" - -// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that -// default behavior of standard TLS config libraries is triggered, which is to -// load system certs. -func LoadSystemCAs() (*x509.CertPool, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go deleted file mode 100644 index a9a040657..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go +++ /dev/null @@ -1,48 +0,0 @@ -package rootcerts - -import ( - "crypto/x509" - "os/exec" - "path" - - "github.com/mitchellh/go-homedir" -) - -// LoadSystemCAs has special behavior on Darwin systems to work around -func LoadSystemCAs() (*x509.CertPool, error) { - pool := x509.NewCertPool() - - for _, keychain := range certKeychains() { - err := addCertsFromKeychain(pool, keychain) - if err != nil { - return nil, err - } - } - - return pool, nil -} - -func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { - cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) - data, err := cmd.Output() - if err != nil { - return err - } - - pool.AppendCertsFromPEM(data) - - return nil -} - -func certKeychains() []string { - keychains := []string{ - "/System/Library/Keychains/SystemRootCertificates.keychain", - "/Library/Keychains/System.keychain", - } - home, err := homedir.Dir() - if err == nil { - loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") - keychains = append(keychains, loginKeychain) - } - return keychains -} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index ad404a811..000000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index 3d5b8bd92..000000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,104 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index 02888d2ab..000000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,654 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(int(v))) - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(int(v))) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - field.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, field); err != nil { - return err - } - } - - decodedFields = append(decodedFields, fieldType.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index f8bb71a04..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,211 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // associated line comment, only when used in a list - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381df..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index cc129b6c7..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,454 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - // object - return keys, nil - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - fmt.Println("illegal") - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // If there is no error, we should be at a RBRACE to end the object - if p.tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - if needComma { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA), - } - } - - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index a3f34a7b5..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,629 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 6cfd8c3a5..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,248 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section, except for escaped quotes and escaped backslashes, which we - // handle specifically. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - // We special case escaped double quotes and escaped backslashes in - // interpolations, converting them to their unescaped equivalents. - if r == '\\' { - q, _ := utf8.DecodeRuneInString(s) - switch q { - case '"', '\\': - continue - } - } - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index 6e9949804..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,214 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 65d56c9b8..000000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,297 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // Done - return keys, nil - case token.ILLEGAL: - fmt.Println("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index 477f71ff3..000000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa begining from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c292..000000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4c..000000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/serf/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/serf/coordinate/README.md b/vendor/github.com/hashicorp/serf/coordinate/README.md deleted file mode 100644 index 0a96fd3eb..000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/README.md +++ /dev/null @@ -1 +0,0 @@ -# TODO - I'll beef this up as I implement each of the enhancements. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go deleted file mode 100644 index 613bfff89..000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ /dev/null @@ -1,180 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "sort" - "sync" - "time" -) - -// Client manages the estimated network coordinate for a given node, and adjusts -// it as the node observes round trip times and estimated coordinates from other -// nodes. The core algorithm is based on Vivaldi, see the documentation for Config -// for more details. -type Client struct { - // coord is the current estimate of the client's network coordinate. - coord *Coordinate - - // origin is a coordinate sitting at the origin. - origin *Coordinate - - // config contains the tuning parameters that govern the performance of - // the algorithm. - config *Config - - // adjustmentIndex is the current index into the adjustmentSamples slice. - adjustmentIndex uint - - // adjustment is used to store samples for the adjustment calculation. - adjustmentSamples []float64 - - // latencyFilterSamples is used to store the last several RTT samples, - // keyed by node name. We will use the config's LatencyFilterSamples - // value to determine how many samples we keep, per node. - latencyFilterSamples map[string][]float64 - - // mutex enables safe concurrent access to the client. - mutex sync.RWMutex -} - -// NewClient creates a new Client and verifies the configuration is valid. -func NewClient(config *Config) (*Client, error) { - if !(config.Dimensionality > 0) { - return nil, fmt.Errorf("dimensionality must be >0") - } - - return &Client{ - coord: NewCoordinate(config), - origin: NewCoordinate(config), - config: config, - adjustmentIndex: 0, - adjustmentSamples: make([]float64, config.AdjustmentWindowSize), - latencyFilterSamples: make(map[string][]float64), - }, nil -} - -// GetCoordinate returns a copy of the coordinate for this client. -func (c *Client) GetCoordinate() *Coordinate { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.Clone() -} - -// SetCoordinate forces the client's coordinate to a known state. -func (c *Client) SetCoordinate(coord *Coordinate) { - c.mutex.Lock() - defer c.mutex.Unlock() - - c.coord = coord.Clone() -} - -// ForgetNode removes any client state for the given node. -func (c *Client) ForgetNode(node string) { - c.mutex.Lock() - defer c.mutex.Unlock() - - delete(c.latencyFilterSamples, node) -} - -// latencyFilter applies a simple moving median filter with a new sample for -// a node. This assumes that the mutex has been locked already. -func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { - samples, ok := c.latencyFilterSamples[node] - if !ok { - samples = make([]float64, 0, c.config.LatencyFilterSize) - } - - // Add the new sample and trim the list, if needed. - samples = append(samples, rttSeconds) - if len(samples) > int(c.config.LatencyFilterSize) { - samples = samples[1:] - } - c.latencyFilterSamples[node] = samples - - // Sort a copy of the samples and return the median. - sorted := make([]float64, len(samples)) - copy(sorted, samples) - sort.Float64s(sorted) - return sorted[len(sorted)/2] -} - -// updateVivialdi updates the Vivaldi portion of the client's coordinate. This -// assumes that the mutex has been locked already. -func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { - const zeroThreshold = 1.0e-6 - - dist := c.coord.DistanceTo(other).Seconds() - if rttSeconds < zeroThreshold { - rttSeconds = zeroThreshold - } - wrongness := math.Abs(dist-rttSeconds) / rttSeconds - - totalError := c.coord.Error + other.Error - if totalError < zeroThreshold { - totalError = zeroThreshold - } - weight := c.coord.Error / totalError - - c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) - if c.coord.Error > c.config.VivaldiErrorMax { - c.coord.Error = c.config.VivaldiErrorMax - } - - delta := c.config.VivaldiCC * weight - force := delta * (rttSeconds - dist) - c.coord = c.coord.ApplyForce(c.config, force, other) -} - -// updateAdjustment updates the adjustment portion of the client's coordinate, if -// the feature is enabled. This assumes that the mutex has been locked already. -func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { - if c.config.AdjustmentWindowSize == 0 { - return - } - - // Note that the existing adjustment factors don't figure in to this - // calculation so we use the raw distance here. - dist := c.coord.rawDistanceTo(other) - c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist - c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize - - sum := 0.0 - for _, sample := range c.adjustmentSamples { - sum += sample - } - c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) -} - -// updateGravity applies a small amount of gravity to pull coordinates towards -// the center of the coordinate system to combat drift. This assumes that the -// mutex is locked already. -func (c *Client) updateGravity() { - dist := c.origin.DistanceTo(c.coord).Seconds() - force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) - c.coord = c.coord.ApplyForce(c.config, force, c.origin) -} - -// Update takes other, a coordinate for another node, and rtt, a round trip -// time observation for a ping to that node, and updates the estimated position of -// the client's coordinate. Returns the updated coordinate. -func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { - c.mutex.Lock() - defer c.mutex.Unlock() - - rttSeconds := c.latencyFilter(node, rtt.Seconds()) - c.updateVivaldi(other, rttSeconds) - c.updateAdjustment(other, rttSeconds) - c.updateGravity() - return c.coord.Clone() -} - -// DistanceTo returns the estimated RTT from the client's coordinate to other, the -// coordinate for another node. -func (c *Client) DistanceTo(other *Coordinate) time.Duration { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.DistanceTo(other) -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go deleted file mode 100644 index a5b3aadfe..000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package coordinate - -// Config is used to set the parameters of the Vivaldi-based coordinate mapping -// algorithm. -// -// The following references are called out at various points in the documentation -// here: -// -// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." -// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. -// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates -// in the Wild." NSDI. Vol. 7. 2007. -// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for -// host-based network coordinate systems." Networking, IEEE/ACM Transactions -// on 18.1 (2010): 27-40. -type Config struct { - // The dimensionality of the coordinate system. As discussed in [2], more - // dimensions improves the accuracy of the estimates up to a point. Per [2] - // we chose 4 dimensions plus a non-Euclidean height. - Dimensionality uint - - // VivaldiErrorMax is the default error value when a node hasn't yet made - // any observations. It also serves as an upper limit on the error value in - // case observations cause the error value to increase without bound. - VivaldiErrorMax float64 - - // VivaldiCE is a tuning factor that controls the maximum impact an - // observation can have on a node's confidence. See [1] for more details. - VivaldiCE float64 - - // VivaldiCC is a tuning factor that controls the maximum impact an - // observation can have on a node's coordinate. See [1] for more details. - VivaldiCC float64 - - // AdjustmentWindowSize is a tuning factor that determines how many samples - // we retain to calculate the adjustment factor as discussed in [3]. Setting - // this to zero disables this feature. - AdjustmentWindowSize uint - - // HeightMin is the minimum value of the height parameter. Since this - // always must be positive, it will introduce a small amount error, so - // the chosen value should be relatively small compared to "normal" - // coordinates. - HeightMin float64 - - // LatencyFilterSamples is the maximum number of samples that are retained - // per node, in order to compute a median. The intent is to ride out blips - // but still keep the delay low, since our time to probe any given node is - // pretty infrequent. See [2] for more details. - LatencyFilterSize uint - - // GravityRho is a tuning factor that sets how much gravity has an effect - // to try to re-center coordinates. See [2] for more details. - GravityRho float64 -} - -// DefaultConfig returns a Config that has some default values suitable for -// basic testing of the algorithm, but not tuned to any particular type of cluster. -func DefaultConfig() *Config { - return &Config{ - Dimensionality: 8, - VivaldiErrorMax: 1.5, - VivaldiCE: 0.25, - VivaldiCC: 0.25, - AdjustmentWindowSize: 20, - HeightMin: 10.0e-6, - LatencyFilterSize: 3, - GravityRho: 150.0, - } -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go deleted file mode 100644 index c9194e048..000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go +++ /dev/null @@ -1,183 +0,0 @@ -package coordinate - -import ( - "math" - "math/rand" - "time" -) - -// Coordinate is a specialized structure for holding network coordinates for the -// Vivaldi-based coordinate mapping algorithm. All of the fields should be public -// to enable this to be serialized. All values in here are in units of seconds. -type Coordinate struct { - // Vec is the Euclidean portion of the coordinate. This is used along - // with the other fields to provide an overall distance estimate. The - // units here are seconds. - Vec []float64 - - // Err reflects the confidence in the given coordinate and is updated - // dynamically by the Vivaldi Client. This is dimensionless. - Error float64 - - // Adjustment is a distance offset computed based on a calculation over - // observations from all other nodes over a fixed window and is updated - // dynamically by the Vivaldi Client. The units here are seconds. - Adjustment float64 - - // Height is a distance offset that accounts for non-Euclidean effects - // which model the access links from nodes to the core Internet. The access - // links are usually set by bandwidth and congestion, and the core links - // usually follow distance based on geography. - Height float64 -} - -const ( - // secondsToNanoseconds is used to convert float seconds to nanoseconds. - secondsToNanoseconds = 1.0e9 - - // zeroThreshold is used to decide if two coordinates are on top of each - // other. - zeroThreshold = 1.0e-6 -) - -// ErrDimensionalityConflict will be panic-d if you try to perform operations -// with incompatible dimensions. -type DimensionalityConflictError struct{} - -// Adds the error interface. -func (e DimensionalityConflictError) Error() string { - return "coordinate dimensionality does not match" -} - -// NewCoordinate creates a new coordinate at the origin, using the given config -// to supply key initial values. -func NewCoordinate(config *Config) *Coordinate { - return &Coordinate{ - Vec: make([]float64, config.Dimensionality), - Error: config.VivaldiErrorMax, - Adjustment: 0.0, - Height: config.HeightMin, - } -} - -// Clone creates an independent copy of this coordinate. -func (c *Coordinate) Clone() *Coordinate { - vec := make([]float64, len(c.Vec)) - copy(vec, c.Vec) - return &Coordinate{ - Vec: vec, - Error: c.Error, - Adjustment: c.Adjustment, - Height: c.Height, - } -} - -// IsCompatibleWith checks to see if the two coordinates are compatible -// dimensionally. If this returns true then you are guaranteed to not get -// any runtime errors operating on them. -func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { - return len(c.Vec) == len(other.Vec) -} - -// ApplyForce returns the result of applying the force from the direction of the -// other coordinate. -func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - ret := c.Clone() - unit, mag := unitVectorAt(c.Vec, other.Vec) - ret.Vec = add(ret.Vec, mul(unit, force)) - if mag > zeroThreshold { - ret.Height = (ret.Height+other.Height)*force/mag + ret.Height - ret.Height = math.Max(ret.Height, config.HeightMin) - } - return ret -} - -// DistanceTo returns the distance between this coordinate and the other -// coordinate, including adjustments. -func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - dist := c.rawDistanceTo(other) - adjustedDist := dist + c.Adjustment + other.Adjustment - if adjustedDist > 0.0 { - dist = adjustedDist - } - return time.Duration(dist * secondsToNanoseconds) -} - -// rawDistanceTo returns the Vivaldi distance between this coordinate and the -// other coordinate in seconds, not including adjustments. This assumes the -// dimensions have already been checked to be compatible. -func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { - return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height -} - -// add returns the sum of vec1 and vec2. This assumes the dimensions have -// already been checked to be compatible. -func add(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i, _ := range ret { - ret[i] = vec1[i] + vec2[i] - } - return ret -} - -// diff returns the difference between the vec1 and vec2. This assumes the -// dimensions have already been checked to be compatible. -func diff(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i, _ := range ret { - ret[i] = vec1[i] - vec2[i] - } - return ret -} - -// mul returns vec multiplied by a scalar factor. -func mul(vec []float64, factor float64) []float64 { - ret := make([]float64, len(vec)) - for i, _ := range vec { - ret[i] = vec[i] * factor - } - return ret -} - -// magnitude computes the magnitude of the vec. -func magnitude(vec []float64) float64 { - sum := 0.0 - for i, _ := range vec { - sum += vec[i] * vec[i] - } - return math.Sqrt(sum) -} - -// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two -// positions are the same then a random unit vector is returned. We also return -// the distance between the points for use in the later height calculation. -func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { - ret := diff(vec1, vec2) - - // If the coordinates aren't on top of each other we can normalize. - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), mag - } - - // Otherwise, just return a random unit vector. - for i, _ := range ret { - ret[i] = rand.Float64() - 0.5 - } - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), 0.0 - } - - // And finally just give up and make a unit vector along the first - // dimension. This should be exceedingly rare. - ret = make([]float64, len(ret)) - ret[0] = 1.0 - return ret, 0.0 -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go deleted file mode 100644 index 6fb033c0c..000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go +++ /dev/null @@ -1,187 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "math/rand" - "time" -) - -// GenerateClients returns a slice with nodes number of clients, all with the -// given config. -func GenerateClients(nodes int, config *Config) ([]*Client, error) { - clients := make([]*Client, nodes) - for i, _ := range clients { - client, err := NewClient(config) - if err != nil { - return nil, err - } - - clients[i] = client - } - return clients, nil -} - -// GenerateLine returns a truth matrix as if all the nodes are in a straight linke -// with the given spacing between them. -func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := time.Duration(j-i) * spacing - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional -// grid with the given spacing between them. -func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - n := int(math.Sqrt(float64(nodes))) - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - x1, y1 := float64(i%n), float64(i/n) - x2, y2 := float64(j%n), float64(j/n) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt := time.Duration(dist * float64(spacing)) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateSplit returns a truth matrix as if half the nodes are close together in -// one location and half the nodes are close together in another. The lan factor -// is used to separate the nodes locally and the wan factor represents the split -// between the two sides. -func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - split := nodes / 2 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := lan - if (i <= split && j > split) || (i > split && j <= split) { - rtt += wan - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed -// around a circle with the given radius. The first node is at the "center" of the -// circle because it's equidistant from all the other nodes, but we place it at -// double the radius, so it should show up above all the other nodes in height. -func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - var rtt time.Duration - if i == 0 { - rtt = 2 * radius - } else { - t1 := 2.0 * math.Pi * float64(i) / float64(nodes) - x1, y1 := math.Cos(t1), math.Sin(t1) - t2 := 2.0 * math.Pi * float64(j) / float64(nodes) - x2, y2 := math.Cos(t2), math.Sin(t2) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt = time.Duration(dist * float64(radius)) - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateRandom returns a truth matrix for a set of nodes with normally -// distributed delays, with the given mean and deviation. The RNG is re-seeded -// so you always get the same matrix for a given size. -func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { - rand.Seed(1) - - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() - rtt := time.Duration(rttSeconds * secondsToNanoseconds) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// Simulate runs the given number of cycles using the given list of clients and -// truth matrix. On each cycle, each client will pick a random node and observe -// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for -// each simulation run to get deterministic results (for this algorithm and the -// underlying algorithm which will use random numbers for position vectors when -// starting out with everything at the origin). -func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { - rand.Seed(1) - - nodes := len(clients) - for cycle := 0; cycle < cycles; cycle++ { - for i, _ := range clients { - if j := rand.Intn(nodes); j != i { - c := clients[j].GetCoordinate() - rtt := truth[i][j] - node := fmt.Sprintf("node_%d", j) - clients[i].Update(node, c, rtt) - } - } - } -} - -// Stats is returned from the Evaluate function with a summary of the algorithm -// performance. -type Stats struct { - ErrorMax float64 - ErrorAvg float64 -} - -// Evaluate uses the coordinates of the given clients to calculate estimated -// distances and compares them with the given truth matrix, returning summary -// stats. -func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { - nodes := len(clients) - count := 0 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() - actual := truth[i][j].Seconds() - error := math.Abs(est-actual) / actual - stats.ErrorMax = math.Max(stats.ErrorMax, error) - stats.ErrorAvg += error - count += 1 - } - } - - stats.ErrorAvg /= float64(count) - fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) - return -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/test_util.go b/vendor/github.com/hashicorp/serf/coordinate/test_util.go deleted file mode 100644 index 116e94933..000000000 --- a/vendor/github.com/hashicorp/serf/coordinate/test_util.go +++ /dev/null @@ -1,27 +0,0 @@ -package coordinate - -import ( - "math" - "testing" -) - -// verifyEqualFloats will compare f1 and f2 and fail if they are not -// "equal" within a threshold. -func verifyEqualFloats(t *testing.T, f1 float64, f2 float64) { - const zeroThreshold = 1.0e-6 - if math.Abs(f1-f2) > zeroThreshold { - t.Fatalf("equal assertion fail, %9.6f != %9.6f", f1, f2) - } -} - -// verifyEqualVectors will compare vec1 and vec2 and fail if they are not -// "equal" within a threshold. -func verifyEqualVectors(t *testing.T, vec1 []float64, vec2 []float64) { - if len(vec1) != len(vec2) { - t.Fatalf("vector length mismatch, %d != %d", len(vec1), len(vec2)) - } - - for i, _ := range vec1 { - verifyEqualFloats(t, vec1[i], vec2[i]) - } -} diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE deleted file mode 100644 index e87a115e4..000000000 --- a/vendor/github.com/hashicorp/vault/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/vault/api/SPEC.md b/vendor/github.com/hashicorp/vault/api/SPEC.md deleted file mode 100644 index 15345f390..000000000 --- a/vendor/github.com/hashicorp/vault/api/SPEC.md +++ /dev/null @@ -1,611 +0,0 @@ -FORMAT: 1A - -# vault - -The Vault API gives you full access to the Vault project. - -If you're browsing this API specifiction in GitHub or in raw -format, please excuse some of the odd formatting. This document -is in api-blueprint format that is read by viewers such as -Apiary. - -## Sealed vs. Unsealed - -Whenever an individual Vault server is started, it is started -in the _sealed_ state. In this state, it knows where its data -is located, but the data is encrypted and Vault doesn't have the -encryption keys to access it. Before Vault can operate, it must -be _unsealed_. - -**Note:** Sealing/unsealing has no relationship to _authentication_ -which is separate and still required once the Vault is unsealed. - -Instead of being sealed with a single key, we utilize -[Shamir's Secret Sharing](http://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing) -to shard a key into _n_ parts such that _t_ parts are required -to reconstruct the original key, where `t <= n`. This means that -Vault itself doesn't know the original key, and no single person -has the original key (unless `n = 1`, or `t` parts are given to -a single person). - -Unsealing is done via an unauthenticated -[unseal API](#reference/seal/unseal/unseal). This API takes a single -master shard and progresses the unsealing process. Once all shards -are given, the Vault is either unsealed or resets the unsealing -process if the key was invalid. - -The entire seal/unseal state is server-wide. This allows multiple -distinct operators to use the unseal API (or more likely the -`vault unseal` command) from separate computers/networks and never -have to transmit their key in order to unseal the vault in a -distributed fashion. - -## Transport - -The API is expected to be accessed over a TLS connection at -all times, with a valid certificate that is verified by a well -behaved client. - -## Authentication - -Once the Vault is unsealed, every other operation requires -authentication. There are multiple methods for authentication -that can be enabled (see -[authentication](#reference/authentication)). - -Authentication is done with the login endpoint. The login endpoint -returns an access token that is set as the `X-Vault-Token` header. - -## Help - -To retrieve the help for any API within Vault, including mounted -backends, credential providers, etc. then append `?help=1` to any -URL. If you have valid permission to access the path, then the help text -will be returned with the following structure: - - { - "help": "help text" - } - -## Error Response - -A common JSON structure is always returned to return errors: - - { - "errors": [ - "message", - "another message" - ] - } - -This structure will be sent down for any non-20x HTTP status. - -## HTTP Status Codes - -The following HTTP status codes are used throughout the API. - -- `200` - Success with data. -- `204` - Success, no data returned. -- `400` - Invalid request, missing or invalid data. -- `403` - Forbidden, your authentication details are either - incorrect or you don't have access to this feature. -- `404` - Invalid path. This can both mean that the path truly - doesn't exist or that you don't have permission to view a - specific path. We use 404 in some cases to avoid state leakage. -- `429` - Rate limit exceeded. Try again after waiting some period - of time. -- `500` - Internal server error. An internal error has occurred, - try again later. If the error persists, report a bug. -- `503` - Vault is down for maintenance or is currently sealed. - Try again later. - -# Group Initialization - -## Initialization [/sys/init] -### Initialization Status [GET] -Returns the status of whether the vault is initialized or not. The -vault doesn't have to be unsealed for this operation. - -+ Response 200 (application/json) - - { - "initialized": true - } - -### Initialize [POST] -Initialize the vault. This is an unauthenticated request to initially -setup a new vault. Although this is unauthenticated, it is still safe: -data cannot be in vault prior to initialization, and any future -authentication will fail if you didn't initialize it yourself. -Additionally, once initialized, a vault cannot be reinitialized. - -This API is the only time Vault will ever be aware of your keys, and -the only time the keys will ever be returned in one unit. Care should -be taken to ensure that the output of this request is never logged, -and that the keys are properly distributed. - -The response also contains the initial root token that can be used -as authentication in order to initially configure Vault once it is -unsealed. Just as with the unseal keys, this is the only time Vault is -ever aware of this token. - -+ Request (application/json) - - { - "secret_shares": 5, - "secret_threshold": 3, - } - -+ Response 200 (application/json) - - { - "keys": ["one", "two", "three"], - "root_token": "foo" - } - -# Group Seal/Unseal - -## Seal Status [/sys/seal-status] -### Seal Status [GET] -Returns the status of whether the vault is currently -sealed or not, as well as the progress of unsealing. - -The response has the following attributes: - -- sealed (boolean) - If true, the vault is sealed. Otherwise, - it is unsealed. -- t (int) - The "t" value for the master key, or the number - of shards needed total to unseal the vault. -- n (int) - The "n" value for the master key, or the total - number of shards of the key distributed. -- progress (int) - The number of master key shards that have - been entered so far towards unsealing the vault. - -+ Response 200 (application/json) - - { - "sealed": true, - "t": 3, - "n": 5, - "progress": 1 - } - -## Seal [/sys/seal] -### Seal [PUT] -Seal the vault. - -Sealing the vault locks Vault from any future operations on any -secrets or system configuration until the vault is once again -unsealed. Internally, sealing throws away the keys to access the -encrypted vault data, so Vault is unable to access the data without -unsealing to get the encryption keys. - -+ Response 204 - -## Unseal [/sys/unseal] -### Unseal [PUT] -Unseal the vault. - -Unseal the vault by entering a portion of the master key. The -response object will tell you if the unseal is complete or -only partial. - -If the vault is already unsealed, this does nothing. It is -not an error, the return value just says the vault is unsealed. -Due to the architecture of Vault, we cannot validate whether -any portion of the unseal key given is valid until all keys -are inputted, therefore unsealing an already unsealed vault -is still a success even if the input key is invalid. - -+ Request (application/json) - - { - "key": "value" - } - -+ Response 200 (application/json) - - { - "sealed": true, - "t": 3, - "n": 5, - "progress": 1 - } - -# Group Authentication - -## List Auth Methods [/sys/auth] -### List all auth methods [GET] -Lists all available authentication methods. - -This returns the name of the authentication method as well as -a human-friendly long-form help text for the method that can be -shown to the user as documentation. - -+ Response 200 (application/json) - - { - "token": { - "type": "token", - "description": "Token authentication" - }, - "oauth": { - "type": "oauth", - "description": "OAuth authentication" - } - } - -## Single Auth Method [/sys/auth/{id}] - -+ Parameters - + id (required, string) ... The ID of the auth method. - -### Enable an auth method [PUT] -Enables an authentication method. - -The body of the request depends on the authentication method -being used. Please reference the documentation for the specific -authentication method you're enabling in order to determine what -parameters you must give it. - -If an authentication method is already enabled, then this can be -used to change the configuration, including even the type of -the configuration. - -+ Request (application/json) - - { - "type": "type", - "key": "value", - "key2": "value2" - } - -+ Response 204 - -### Disable an auth method [DELETE] -Disables an authentication method. Previously authenticated sessions -are immediately invalidated. - -+ Response 204 - -# Group Policies - -Policies are named permission sets that identities returned by -credential stores are bound to. This separates _authentication_ -from _authorization_. - -## Policies [/sys/policy] -### List all Policies [GET] - -List all the policies. - -+ Response 200 (application/json) - - { - "policies": ["root"] - } - -## Single Policy [/sys/policy/{id}] - -+ Parameters - + id (required, string) ... The name of the policy - -### Upsert [PUT] - -Create or update a policy with the given ID. - -+ Request (application/json) - - { - "rules": "HCL" - } - -+ Response 204 - -### Delete [DELETE] - -Delete a policy with the given ID. Any identities bound to this -policy will immediately become "deny all" despite already being -authenticated. - -+ Response 204 - -# Group Mounts - -Logical backends are mounted at _mount points_, similar to -filesystems. This allows you to mount the "aws" logical backend -at the "aws-us-east" path, so all access is at `/aws-us-east/keys/foo` -for example. This enables multiple logical backends to be enabled. - -## Mounts [/sys/mounts] -### List all mounts [GET] - -Lists all the active mount points. - -+ Response 200 (application/json) - - { - "aws": { - "type": "aws", - "description": "AWS" - }, - "pg": { - "type": "postgresql", - "description": "PostgreSQL dynamic users" - } - } - -## Single Mount [/sys/mounts/{path}] -### New Mount [POST] - -Mount a logical backend to a new path. - -Configuration for this new backend is done via the normal -read/write mechanism once it is mounted. - -+ Request (application/json) - - { - "type": "aws", - "description": "EU AWS tokens" - } - -+ Response 204 - -### Unmount [DELETE] - -Unmount a mount point. - -+ Response 204 - -## Remount [/sys/remount] -### Remount [POST] - -Move an already-mounted backend to a new path. - -+ Request (application/json) - - { - "from": "aws", - "to": "aws-east" - } - -+ Response 204 - -# Group Audit Backends - -Audit backends are responsible for shuttling the audit logs that -Vault generates to a durable system for future querying. By default, -audit logs are not stored anywhere. - -## Audit Backends [/sys/audit] -### List Enabled Audit Backends [GET] - -List all the enabled audit backends - -+ Response 200 (application/json) - - { - "file": { - "type": "file", - "description": "Send audit logs to a file", - "options": {} - } - } - -## Single Audit Backend [/sys/audit/{path}] - -+ Parameters - + path (required, string) ... The path where the audit backend is mounted - -### Enable [PUT] - -Enable an audit backend. - -+ Request (application/json) - - { - "type": "file", - "description": "send to a file", - "options": { - "path": "/var/log/vault.audit.log" - } - } - -+ Response 204 - -### Disable [DELETE] - -Disable an audit backend. - -+ Request (application/json) - -+ Response 204 - -# Group Secrets - -## Generic [/{mount}/{path}] - -This group documents the general format of reading and writing -to Vault. The exact structure of the keyspace is defined by the -logical backends in use, so documentation related to -a specific backend should be referenced for details on what keys -and routes are expected. - -The path for examples are `/prefix/path`, but in practice -these will be defined by the backends that are mounted. For -example, reading an AWS key might be at the `/aws/root` path. -These paths are defined by the logical backends. - -+ Parameters - + mount (required, string) ... The mount point for the - logical backend. Example: `aws`. - + path (optional, string) ... The path within the backend - to read or write data. - -### Read [GET] - -Read data from vault. - -The data read from the vault can either be a secret or -arbitrary configuration data. The type of data returned -depends on the path, and is defined by the logical backend. - -If the return value is a secret, then the return structure -is a mixture of arbitrary key/value along with the following -fields which are guaranteed to exist: - -- `lease_id` (string) - A unique ID used for renewal and - revocation. - -- `renewable` (bool) - If true, then this key can be renewed. - If a key can't be renewed, then a new key must be requested - after the lease duration period. - -- `lease_duration` (int) - The time in seconds that a secret is - valid for before it must be renewed. - -- `lease_duration_max` (int) - The maximum amount of time in - seconds that a secret is valid for. This will always be - greater than or equal to `lease_duration`. The difference - between this and `lease_duration` is an overlap window - where multiple keys may be valid. - -If the return value is not a secret, then the return structure -is an arbitrary JSON object. - -+ Response 200 (application/json) - - { - "lease_id": "UUID", - "lease_duration": 3600, - "key": "value" - } - -### Write [PUT] - -Write data to vault. - -The behavior and arguments to the write are defined by -the logical backend. - -+ Request (application/json) - - { - "key": "value" - } - -+ Response 204 - -# Group Lease Management - -## Renew Key [/sys/renew/{id}] - -+ Parameters - + id (required, string) ... The `lease_id` of the secret - to renew. - -### Renew [PUT] - -+ Response 200 (application/json) - - { - "lease_id": "...", - "lease_duration": 3600, - "access_key": "foo", - "secret_key": "bar" - } - -## Revoke Key [/sys/revoke/{id}] - -+ Parameters - + id (required, string) ... The `lease_id` of the secret - to revoke. - -### Revoke [PUT] - -+ Response 204 - -# Group Backend: AWS - -## Root Key [/aws/root] -### Set the Key [PUT] - -Set the root key that the logical backend will use to create -new secrets, IAM policies, etc. - -+ Request (application/json) - - { - "access_key": "key", - "secret_key": "key", - "region": "us-east-1" - } - -+ Response 204 - -## Policies [/aws/policies] -### List Policies [GET] - -List all the policies that can be used to create keys. - -+ Response 200 (application/json) - - [{ - "name": "root", - "description": "Root access" - }, { - "name": "web-deploy", - "description": "Enough permissions to deploy the web app." - }] - -## Single Policy [/aws/policies/{name}] - -+ Parameters - + name (required, string) ... Name of the policy. - -### Read [GET] - -Read a policy. - -+ Response 200 (application/json) - - { - "policy": "base64-encoded policy" - } - -### Upsert [PUT] - -Create or update a policy. - -+ Request (application/json) - - { - "policy": "base64-encoded policy" - } - -+ Response 204 - -### Delete [DELETE] - -Delete the policy with the given name. - -+ Response 204 - -## Generate Access Keys [/aws/keys/{policy}] -### Create [GET] - -This generates a new keypair for the given policy. - -+ Parameters - + policy (required, string) ... The policy under which to create - the key pair. - -+ Response 200 (application/json) - - { - "lease_id": "...", - "lease_duration": 3600, - "access_key": "foo", - "secret_key": "bar" - } diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go deleted file mode 100644 index da870c111..000000000 --- a/vendor/github.com/hashicorp/vault/api/auth.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// Auth is used to perform credential backend related operations. -type Auth struct { - c *Client -} - -// Auth is used to return the client for credential-backend API calls. -func (c *Client) Auth() *Auth { - return &Auth{c: c} -} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go deleted file mode 100644 index 2dae4df62..000000000 --- a/vendor/github.com/hashicorp/vault/api/auth_token.go +++ /dev/null @@ -1,178 +0,0 @@ -package api - -// TokenAuth is used to perform token backend operations on Vault -type TokenAuth struct { - c *Client -} - -// Token is used to return the client for token-backend API calls -func (a *Auth) Token() *TokenAuth { - return &TokenAuth{c: a.c} -} - -func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/create") - if err := r.SetJSONBody(opts); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName) - if err := r.SetJSONBody(opts); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *TokenAuth) Lookup(token string) (*Secret, error) { - r := c.c.NewRequest("GET", "/v1/auth/token/lookup/"+token) - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { - r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor/"+accessor) - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *TokenAuth) LookupSelf() (*Secret, error) { - r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/auth/token/renew/"+token) - - body := map[string]interface{}{"increment": increment} - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") - - body := map[string]interface{}{"increment": increment} - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -// RevokeAccessor revokes a token associated with the given accessor -// along with all the child tokens. -func (c *TokenAuth) RevokeAccessor(accessor string) error { - r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor/"+accessor) - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -// RevokeOrphan revokes a token without revoking the tree underneath it (so -// child tokens are orphaned rather than revoked) -func (c *TokenAuth) RevokeOrphan(token string) error { - r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan/"+token) - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -// RevokeSelf revokes the token making the call -func (c *TokenAuth) RevokeSelf(token string) error { - r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -// RevokeTree is the "normal" revoke operation that revokes the given token and -// the entire tree underneath -- all of its child tokens, their child tokens, -// etc. -func (c *TokenAuth) RevokeTree(token string) error { - r := c.c.NewRequest("PUT", "/v1/auth/token/revoke/"+token) - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -// TokenCreateRequest is the options structure for creating a token. -type TokenCreateRequest struct { - ID string `json:"id,omitempty"` - Policies []string `json:"policies,omitempty"` - Metadata map[string]string `json:"meta,omitempty"` - Lease string `json:"lease,omitempty"` - TTL string `json:"ttl,omitempty"` - ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` - NoParent bool `json:"no_parent,omitempty"` - NoDefaultPolicy bool `json:"no_default_policy,omitempty"` - DisplayName string `json:"display_name"` - NumUses int `json:"num_uses"` - Renewable *bool `json:"renewable,omitempty"` -} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go deleted file mode 100644 index 70bc5e00e..000000000 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ /dev/null @@ -1,333 +0,0 @@ -package api - -import ( - "crypto/tls" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-rootcerts" -) - -const EnvVaultAddress = "VAULT_ADDR" -const EnvVaultCACert = "VAULT_CACERT" -const EnvVaultCAPath = "VAULT_CAPATH" -const EnvVaultClientCert = "VAULT_CLIENT_CERT" -const EnvVaultClientKey = "VAULT_CLIENT_KEY" -const EnvVaultInsecure = "VAULT_SKIP_VERIFY" -const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" -const EnvVaultWrapTTL = "VAULT_WRAP_TTL" - -var ( - errRedirect = errors.New("redirect") -) - -// WrappingLookupFunc is a function that, given an HTTP verb and a path, -// returns an optional string duration to be used for response wrapping (e.g. -// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", -// however, end-of-path forward slashes are not trimmed, so must match your -// called path precisely. -type WrappingLookupFunc func(operation, path string) string - -// Config is used to configure the creation of the client. -type Config struct { - // Address is the address of the Vault server. This should be a complete - // URL such as "http://vault.example.com". If you need a custom SSL - // cert or want to enable insecure mode, you need to specify a custom - // HttpClient. - Address string - - // HttpClient is the HTTP client to use, which will currently always have the - // same values as http.DefaultClient. This is used to control redirect behavior. - HttpClient *http.Client - - redirectSetup sync.Once -} - -// DefaultConfig returns a default configuration for the client. It is -// safe to modify the return value of this function. -// -// The default Address is https://127.0.0.1:8200, but this can be overridden by -// setting the `VAULT_ADDR` environment variable. -func DefaultConfig() *Config { - config := &Config{ - Address: "https://127.0.0.1:8200", - - HttpClient: cleanhttp.DefaultClient(), - } - config.HttpClient.Timeout = time.Second * 60 - transport := config.HttpClient.Transport.(*http.Transport) - transport.TLSHandshakeTimeout = 10 * time.Second - transport.TLSClientConfig = &tls.Config{ - MinVersion: tls.VersionTLS12, - } - - if v := os.Getenv(EnvVaultAddress); v != "" { - config.Address = v - } - - return config -} - -// ReadEnvironment reads configuration information from the -// environment. If there is an error, no configuration value -// is updated. -func (c *Config) ReadEnvironment() error { - var envAddress string - var envCACert string - var envCAPath string - var envClientCert string - var envClientKey string - var envInsecure bool - var foundInsecure bool - var envTLSServerName string - - var clientCert tls.Certificate - var foundClientCert bool - var err error - - if v := os.Getenv(EnvVaultAddress); v != "" { - envAddress = v - } - if v := os.Getenv(EnvVaultCACert); v != "" { - envCACert = v - } - if v := os.Getenv(EnvVaultCAPath); v != "" { - envCAPath = v - } - if v := os.Getenv(EnvVaultClientCert); v != "" { - envClientCert = v - } - if v := os.Getenv(EnvVaultClientKey); v != "" { - envClientKey = v - } - if v := os.Getenv(EnvVaultInsecure); v != "" { - var err error - envInsecure, err = strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY") - } - foundInsecure = true - } - if v := os.Getenv(EnvVaultTLSServerName); v != "" { - envTLSServerName = v - } - // If we need custom TLS configuration, then set it - if envCACert != "" || envCAPath != "" || envClientCert != "" || envClientKey != "" || envInsecure { - if envClientCert != "" && envClientKey != "" { - clientCert, err = tls.LoadX509KeyPair(envClientCert, envClientKey) - if err != nil { - return err - } - foundClientCert = true - } else if envClientCert != "" || envClientKey != "" { - return fmt.Errorf("Both client cert and client key must be provided") - } - } - - if envAddress != "" { - c.Address = envAddress - } - - clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig - if foundInsecure { - clientTLSConfig.InsecureSkipVerify = envInsecure - } - - if foundClientCert { - clientTLSConfig.Certificates = []tls.Certificate{clientCert} - } - if envTLSServerName != "" { - clientTLSConfig.ServerName = envTLSServerName - } - - rootConfig := &rootcerts.Config{ - CAFile: envCACert, - CAPath: envCAPath, - } - err = rootcerts.ConfigureTLS(clientTLSConfig, rootConfig) - if err != nil { - return err - } - - return nil -} - -// Client is the client to the Vault API. Create a client with -// NewClient. -type Client struct { - addr *url.URL - config *Config - token string - wrappingLookupFunc WrappingLookupFunc -} - -// NewClient returns a new client for the given configuration. -// -// If the environment variable `VAULT_TOKEN` is present, the token will be -// automatically added to the client. Otherwise, you must manually call -// `SetToken()`. -func NewClient(c *Config) (*Client, error) { - u, err := url.Parse(c.Address) - if err != nil { - return nil, err - } - - if c.HttpClient == nil { - c.HttpClient = DefaultConfig().HttpClient - } - - redirFunc := func() { - // Ensure redirects are not automatically followed - // Note that this is sane for the API client as it has its own - // redirect handling logic (and thus also for command/meta), - // but in e.g. http_test actual redirect handling is necessary - c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return errRedirect - } - } - - c.redirectSetup.Do(redirFunc) - - client := &Client{ - addr: u, - config: c, - } - - if token := os.Getenv("VAULT_TOKEN"); token != "" { - client.SetToken(token) - } - - return client, nil -} - -// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs -// for a given operation and path -func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { - c.wrappingLookupFunc = lookupFunc -} - -// Token returns the access token being used by this client. It will -// return the empty string if there is no token set. -func (c *Client) Token() string { - return c.token -} - -// SetToken sets the token directly. This won't perform any auth -// verification, it simply sets the token properly for future requests. -func (c *Client) SetToken(v string) { - c.token = v -} - -// ClearToken deletes the token if it is set or does nothing otherwise. -func (c *Client) ClearToken() { - c.token = "" -} - -// NewRequest creates a new raw request object to query the Vault server -// configured for this client. This is an advanced method and generally -// doesn't need to be called externally. -func (c *Client) NewRequest(method, path string) *Request { - req := &Request{ - Method: method, - URL: &url.URL{ - Scheme: c.addr.Scheme, - Host: c.addr.Host, - Path: path, - }, - ClientToken: c.token, - Params: make(map[string][]string), - } - - if c.wrappingLookupFunc != nil { - var lookupPath string - switch { - case strings.HasPrefix(path, "/v1/"): - lookupPath = strings.TrimPrefix(path, "/v1/") - case strings.HasPrefix(path, "v1/"): - lookupPath = strings.TrimPrefix(path, "v1/") - default: - lookupPath = path - } - req.WrapTTL = c.wrappingLookupFunc(method, lookupPath) - } - - return req -} - -// RawRequest performs the raw request given. This request may be against -// a Vault server not configured with this client. This is an advanced operation -// that generally won't need to be called externally. -func (c *Client) RawRequest(r *Request) (*Response, error) { - redirectCount := 0 -START: - req, err := r.ToHTTP() - if err != nil { - return nil, err - } - - var result *Response - resp, err := c.config.HttpClient.Do(req) - if resp != nil { - result = &Response{Response: resp} - } - if err != nil { - if urlErr, ok := err.(*url.Error); ok && urlErr.Err == errRedirect { - err = nil - } else if strings.Contains(err.Error(), "tls: oversized") { - err = fmt.Errorf( - "%s\n\n"+ - "This error usually means that the server is running with TLS disabled\n"+ - "but the client is configured to use TLS. Please either enable TLS\n"+ - "on the server or run the client with -address set to an address\n"+ - "that uses the http protocol:\n\n"+ - " vault -address http://
\n\n"+ - "You can also set the VAULT_ADDR environment variable:\n\n\n"+ - " VAULT_ADDR=http://
vault \n\n"+ - "where
is replaced by the actual address to the server.", - err) - } - } - if err != nil { - return result, err - } - - // Check for a redirect, only allowing for a single redirect - if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 { - // Parse the updated location - respLoc, err := resp.Location() - if err != nil { - return result, err - } - - // Ensure a protocol downgrade doesn't happen - if req.URL.Scheme == "https" && respLoc.Scheme != "https" { - return result, fmt.Errorf("redirect would cause protocol downgrade") - } - - // Update the request - r.URL = respLoc - - // Reset the request body if any - if err := r.ResetJSONBody(); err != nil { - return result, err - } - - // Retry the request - redirectCount++ - goto START - } - - if err := result.Error(); err != nil { - return result, err - } - - return result, nil -} diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go deleted file mode 100644 index b9ae100bc..000000000 --- a/vendor/github.com/hashicorp/vault/api/help.go +++ /dev/null @@ -1,25 +0,0 @@ -package api - -import ( - "fmt" -) - -// Help reads the help information for the given path. -func (c *Client) Help(path string) (*Help, error) { - r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) - r.Params.Add("help", "1") - resp, err := c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result Help - err = resp.DecodeJSON(&result) - return &result, err -} - -type Help struct { - Help string `json:"help"` - SeeAlso []string `json:"see_also"` -} diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go deleted file mode 100644 index 2e967f445..000000000 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ /dev/null @@ -1,123 +0,0 @@ -package api - -import ( - "bytes" - "encoding/json" - "fmt" -) - -const ( - wrappedResponseLocation = "cubbyhole/response" -) - -// Logical is used to perform logical backend operations on Vault. -type Logical struct { - c *Client -} - -// Logical is used to return the client for logical-backend API calls. -func (c *Client) Logical() *Logical { - return &Logical{c: c} -} - -func (c *Logical) Read(path string) (*Secret, error) { - r := c.c.NewRequest("GET", "/v1/"+path) - resp, err := c.c.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if resp != nil && resp.StatusCode == 404 { - return nil, nil - } - if err != nil { - return nil, err - } - - return ParseSecret(resp.Body) -} - -func (c *Logical) List(path string) (*Secret, error) { - r := c.c.NewRequest("GET", "/v1/"+path) - r.Params.Set("list", "true") - resp, err := c.c.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if resp != nil && resp.StatusCode == 404 { - return nil, nil - } - if err != nil { - return nil, err - } - - return ParseSecret(resp.Body) -} - -func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/"+path) - if err := r.SetJSONBody(data); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if err != nil { - return nil, err - } - - if resp.StatusCode == 200 { - return ParseSecret(resp.Body) - } - - return nil, nil -} - -func (c *Logical) Delete(path string) (*Secret, error) { - r := c.c.NewRequest("DELETE", "/v1/"+path) - resp, err := c.c.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if err != nil { - return nil, err - } - - if resp.StatusCode == 200 { - return ParseSecret(resp.Body) - } - - return nil, nil -} - -func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { - origToken := c.c.Token() - defer c.c.SetToken(origToken) - - c.c.SetToken(wrappingToken) - - secret, err := c.Read(wrappedResponseLocation) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err) - } - if secret == nil { - return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation) - } - if secret.Data == nil { - return nil, fmt.Errorf("\"data\" not found in wrapping response") - } - if _, ok := secret.Data["response"]; !ok { - return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") - } - - wrappedSecret := new(Secret) - buf := bytes.NewBufferString(secret.Data["response"].(string)) - dec := json.NewDecoder(buf) - dec.UseNumber() - if err := dec.Decode(wrappedSecret); err != nil { - return nil, fmt.Errorf("error unmarshaling wrapped secret: %s", err) - } - - return wrappedSecret, nil -} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go deleted file mode 100644 index 8f22dd572..000000000 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ /dev/null @@ -1,71 +0,0 @@ -package api - -import ( - "bytes" - "encoding/json" - "io" - "net/http" - "net/url" -) - -// Request is a raw request configuration structure used to initiate -// API requests to the Vault server. -type Request struct { - Method string - URL *url.URL - Params url.Values - ClientToken string - WrapTTL string - Obj interface{} - Body io.Reader - BodySize int64 -} - -// SetJSONBody is used to set a request body that is a JSON-encoded value. -func (r *Request) SetJSONBody(val interface{}) error { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(val); err != nil { - return err - } - - r.Obj = val - r.Body = buf - r.BodySize = int64(buf.Len()) - return nil -} - -// ResetJSONBody is used to reset the body for a redirect -func (r *Request) ResetJSONBody() error { - if r.Body == nil { - return nil - } - return r.SetJSONBody(r.Obj) -} - -// ToHTTP turns this request into a valid *http.Request for use with the -// net/http package. -func (r *Request) ToHTTP() (*http.Request, error) { - // Encode the query parameters - r.URL.RawQuery = r.Params.Encode() - - // Create the HTTP request - req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body) - if err != nil { - return nil, err - } - - req.URL.Scheme = r.URL.Scheme - req.URL.Host = r.URL.Host - req.Host = r.URL.Host - - if len(r.ClientToken) != 0 { - req.Header.Set("X-Vault-Token", r.ClientToken) - } - - if len(r.WrapTTL) != 0 { - req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) - } - - return req, nil -} diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go deleted file mode 100644 index a646a0da2..000000000 --- a/vendor/github.com/hashicorp/vault/api/response.go +++ /dev/null @@ -1,75 +0,0 @@ -package api - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" -) - -// Response is a raw response that wraps an HTTP response. -type Response struct { - *http.Response -} - -// DecodeJSON will decode the response body to a JSON structure. This -// will consume the response body, but will not close it. Close must -// still be called. -func (r *Response) DecodeJSON(out interface{}) error { - dec := json.NewDecoder(r.Body) - dec.UseNumber() - return dec.Decode(out) -} - -// Error returns an error response if there is one. If there is an error, -// this will fully consume the response body, but will not close it. The -// body must still be closed manually. -func (r *Response) Error() error { - // 200 to 399 are okay status codes - if r.StatusCode >= 200 && r.StatusCode < 400 { - return nil - } - - // We have an error. Let's copy the body into our own buffer first, - // so that if we can't decode JSON, we can at least copy it raw. - var bodyBuf bytes.Buffer - if _, err := io.Copy(&bodyBuf, r.Body); err != nil { - return err - } - - // Decode the error response if we can. Note that we wrap the bodyBuf - // in a bytes.Reader here so that the JSON decoder doesn't move the - // read pointer for the original buffer. - var resp ErrorResponse - dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) - dec.UseNumber() - if err := dec.Decode(&resp); err != nil { - // Ignore the decoding error and just drop the raw response - return fmt.Errorf( - "Error making API request.\n\n"+ - "URL: %s %s\n"+ - "Code: %d. Raw Message:\n\n%s", - r.Request.Method, r.Request.URL.String(), - r.StatusCode, bodyBuf.String()) - } - - var errBody bytes.Buffer - errBody.WriteString(fmt.Sprintf( - "Error making API request.\n\n"+ - "URL: %s %s\n"+ - "Code: %d. Errors:\n\n", - r.Request.Method, r.Request.URL.String(), - r.StatusCode)) - for _, err := range resp.Errors { - errBody.WriteString(fmt.Sprintf("* %s", err)) - } - - return fmt.Errorf(errBody.String()) -} - -// ErrorResponse is the raw structure of errors when they're returned by the -// HTTP API. -type ErrorResponse struct { - Errors []string -} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go deleted file mode 100644 index d0a539e47..000000000 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ /dev/null @@ -1,66 +0,0 @@ -package api - -import ( - "encoding/json" - "io" - "time" -) - -// Secret is the structure returned for every secret within Vault. -type Secret struct { - LeaseID string `json:"lease_id"` - LeaseDuration int `json:"lease_duration"` - Renewable bool `json:"renewable"` - - // Data is the actual contents of the secret. The format of the data - // is arbitrary and up to the secret backend. - Data map[string]interface{} `json:"data"` - - // Warnings contains any warnings related to the operation. These - // are not issues that caused the command to fail, but that the - // client should be aware of. - Warnings []string `json:"warnings"` - - // Auth, if non-nil, means that there was authentication information - // attached to this response. - Auth *SecretAuth `json:"auth,omitempty"` - - // WrapInfo, if non-nil, means that the initial response was wrapped in the - // cubbyhole of the given token (which has a TTL of the given number of - // seconds) - WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` -} - -// SecretWrapInfo contains wrapping information if we have it. If what is -// contained is an authentication token, the accessor for the token will be -// available in WrappedAccessor. -type SecretWrapInfo struct { - Token string `json:"token"` - TTL int `json:"ttl"` - CreationTime time.Time `json:"creation_time"` - WrappedAccessor string `json:"wrapped_accessor"` -} - -// SecretAuth is the structure containing auth information if we have it. -type SecretAuth struct { - ClientToken string `json:"client_token"` - Accessor string `json:"accessor"` - Policies []string `json:"policies"` - Metadata map[string]string `json:"metadata"` - - LeaseDuration int `json:"lease_duration"` - Renewable bool `json:"renewable"` -} - -// ParseSecret is used to parse a secret value from JSON from an io.Reader. -func ParseSecret(r io.Reader) (*Secret, error) { - // First decode the JSON into a map[string]interface{} - var secret Secret - dec := json.NewDecoder(r) - dec.UseNumber() - if err := dec.Decode(&secret); err != nil { - return nil, err - } - - return &secret, nil -} diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go deleted file mode 100644 index 7c3e56bb4..000000000 --- a/vendor/github.com/hashicorp/vault/api/ssh.go +++ /dev/null @@ -1,38 +0,0 @@ -package api - -import "fmt" - -// SSH is used to return a client to invoke operations on SSH backend. -type SSH struct { - c *Client - MountPoint string -} - -// SSH returns the client for logical-backend API calls. -func (c *Client) SSH() *SSH { - return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) -} - -// SSHWithMountPoint returns the client with specific SSH mount point. -func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { - return &SSH{ - c: c, - MountPoint: mountPoint, - } -} - -// Credential invokes the SSH backend API to create a credential to establish an SSH session. -func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) - if err := r.SetJSONBody(data); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go deleted file mode 100644 index 2dd85d14f..000000000 --- a/vendor/github.com/hashicorp/vault/api/ssh_agent.go +++ /dev/null @@ -1,239 +0,0 @@ -package api - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-rootcerts" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" -) - -const ( - // SSHHelperDefaultMountPoint is the default path at which SSH backend will be - // mounted in the Vault server. - SSHHelperDefaultMountPoint = "ssh" - - // VerifyEchoRequest is the echo request message sent as OTP by the helper. - VerifyEchoRequest = "verify-echo-request" - - // VerifyEchoResponse is the echo response message sent as a response to OTP - // matching echo request. - VerifyEchoResponse = "verify-echo-response" -) - -// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server -// in order to verify the OTP entered by the user. It contains the path at which -// SSH backend is mounted at the server. -type SSHHelper struct { - c *Client - MountPoint string -} - -// SSHVerifyResponse is a structure representing the fields in Vault server's -// response. -type SSHVerifyResponse struct { - // Usually empty. If the request OTP is echo request message, this will - // be set to the corresponding echo response message. - Message string `mapstructure:"message"` - - // Username associated with the OTP - Username string `mapstructure:"username"` - - // IP associated with the OTP - IP string `mapstructure:"ip"` -} - -// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. -type SSHHelperConfig struct { - VaultAddr string `hcl:"vault_addr"` - SSHMountPoint string `hcl:"ssh_mount_point"` - CACert string `hcl:"ca_cert"` - CAPath string `hcl:"ca_path"` - AllowedCidrList string `hcl:"allowed_cidr_list"` - TLSSkipVerify bool `hcl:"tls_skip_verify"` -} - -// SetTLSParameters sets the TLS parameters for this SSH agent. -func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { - tlsConfig := &tls.Config{ - InsecureSkipVerify: c.TLSSkipVerify, - MinVersion: tls.VersionTLS12, - RootCAs: certPool, - } - - transport := cleanhttp.DefaultTransport() - transport.TLSClientConfig = tlsConfig - clientConfig.HttpClient.Transport = transport -} - -// NewClient returns a new client for the configuration. This client will be used by the -// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. -// If the configuration supplies Vault SSL certificates, then the client will -// have TLS configured in its transport. -func (c *SSHHelperConfig) NewClient() (*Client, error) { - // Creating a default client configuration for communicating with vault server. - clientConfig := DefaultConfig() - - // Pointing the client to the actual address of vault server. - clientConfig.Address = c.VaultAddr - - // Check if certificates are provided via config file. - if c.CACert != "" || c.CAPath != "" || c.TLSSkipVerify { - rootConfig := &rootcerts.Config{ - CAFile: c.CACert, - CAPath: c.CAPath, - } - certPool, err := rootcerts.LoadCACerts(rootConfig) - if err != nil { - return nil, err - } - // Enable TLS on the HTTP client information - c.SetTLSParameters(clientConfig, certPool) - } - - // Creating the client object for the given configuration - client, err := NewClient(clientConfig) - if err != nil { - return nil, err - } - - return client, nil -} - -// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding -// in-memory structure. -// -// Vault address is a required parameter. -// Mount point defaults to "ssh". -func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { - contents, err := ioutil.ReadFile(path) - if err != nil && !os.IsNotExist(err) { - return nil, multierror.Prefix(err, "ssh_helper:") - } - return ParseSSHHelperConfig(string(contents)) -} - -// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper -// configuration. -func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { - root, err := hcl.Parse(string(contents)) - if err != nil { - return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err) - } - - list, ok := root.Node.(*ast.ObjectList) - if !ok { - return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object") - } - - valid := []string{ - "vault_addr", - "ssh_mount_point", - "ca_cert", - "ca_path", - "allowed_cidr_list", - "tls_skip_verify", - } - if err := checkHCLKeys(list, valid); err != nil { - return nil, multierror.Prefix(err, "ssh_helper:") - } - - var c SSHHelperConfig - c.SSHMountPoint = SSHHelperDefaultMountPoint - if err := hcl.DecodeObject(&c, list); err != nil { - return nil, multierror.Prefix(err, "ssh_helper:") - } - - if c.VaultAddr == "" { - return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'") - } - return &c, nil -} - -// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend -// mounted at default path ("ssh"). -func (c *Client) SSHHelper() *SSHHelper { - return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) -} - -// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend -// mounted at a specific mount point. -func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { - return &SSHHelper{ - c: c, - MountPoint: mountPoint, - } -} - -// Verify verifies if the key provided by user is present in Vault server. The response -// will contain the IP address and username associated with the OTP. In case the -// OTP matches the echo request message, instead of searching an entry for the OTP, -// an echo response message is returned. This feature is used by ssh-helper to verify if -// its configured correctly. -func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { - data := map[string]interface{}{ - "otp": otp, - } - verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) - r := c.c.NewRequest("PUT", verifyPath) - if err := r.SetJSONBody(data); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - secret, err := ParseSecret(resp.Body) - if err != nil { - return nil, err - } - - if secret.Data == nil { - return nil, nil - } - - var verifyResp SSHVerifyResponse - err = mapstructure.Decode(secret.Data, &verifyResp) - if err != nil { - return nil, err - } - return &verifyResp, nil -} - -func checkHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf( - "invalid key '%s' on line %d", key, item.Assign.Line)) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go deleted file mode 100644 index 5fb111887..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// Sys is used to perform system-related operations on Vault. -type Sys struct { - c *Client -} - -// Sys is used to return the client for sys-related API calls. -func (c *Client) Sys() *Sys { - return &Sys{c: c} -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go deleted file mode 100644 index 6fbe1ef22..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_audit.go +++ /dev/null @@ -1,85 +0,0 @@ -package api - -import ( - "fmt" -) - -func (c *Sys) AuditHash(path string, input string) (string, error) { - body := map[string]interface{}{ - "input": input, - } - - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path)) - if err := r.SetJSONBody(body); err != nil { - return "", err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return "", err - } - defer resp.Body.Close() - - type d struct { - Hash string - } - - var result d - err = resp.DecodeJSON(&result) - return result.Hash, err -} - -func (c *Sys) ListAudit() (map[string]*Audit, error) { - r := c.c.NewRequest("GET", "/v1/sys/audit") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result map[string]*Audit - err = resp.DecodeJSON(&result) - return result, err -} - -func (c *Sys) EnableAudit( - path string, auditType string, desc string, opts map[string]string) error { - body := map[string]interface{}{ - "type": auditType, - "description": desc, - "options": opts, - } - - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path)) - if err := r.SetJSONBody(body); err != nil { - return err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (c *Sys) DisableAudit(path string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -// Structures for the requests/resposne are all down here. They aren't -// individually documentd because the map almost directly to the raw HTTP API -// documentation. Please refer to that documentation for more details. - -type Audit struct { - Path string - Type string - Description string - Options map[string]string -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go deleted file mode 100644 index 8e1cdec39..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_auth.go +++ /dev/null @@ -1,56 +0,0 @@ -package api - -import ( - "fmt" -) - -func (c *Sys) ListAuth() (map[string]*AuthMount, error) { - r := c.c.NewRequest("GET", "/v1/sys/auth") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result map[string]*AuthMount - err = resp.DecodeJSON(&result) - return result, err -} - -func (c *Sys) EnableAuth(path, authType, desc string) error { - body := map[string]string{ - "type": authType, - "description": desc, - } - - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path)) - if err := r.SetJSONBody(body); err != nil { - return err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (c *Sys) DisableAuth(path string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -// Structures for the requests/resposne are all down here. They aren't -// individually documentd because the map almost directly to the raw HTTP API -// documentation. Please refer to that documentation for more details. - -type AuthMount struct { - Type string - Description string -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go deleted file mode 100644 index e3034dded..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go +++ /dev/null @@ -1,60 +0,0 @@ -package api - -func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { - body := map[string]string{ - "path": path, - } - - r := c.c.NewRequest("POST", "/v1/sys/capabilities-self") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result map[string]interface{} - err = resp.DecodeJSON(&result) - if err != nil { - return nil, err - } - var capabilities []string - capabilitiesRaw := result["capabilities"].([]interface{}) - for _, capability := range capabilitiesRaw { - capabilities = append(capabilities, capability.(string)) - } - return capabilities, nil -} - -func (c *Sys) Capabilities(token, path string) ([]string, error) { - body := map[string]string{ - "token": token, - "path": path, - } - - r := c.c.NewRequest("POST", "/v1/sys/capabilities") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result map[string]interface{} - err = resp.DecodeJSON(&result) - if err != nil { - return nil, err - } - var capabilities []string - capabilitiesRaw := result["capabilities"].([]interface{}) - for _, capability := range capabilitiesRaw { - capabilities = append(capabilities, capability.(string)) - } - return capabilities, nil -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go deleted file mode 100644 index 8dc2095f3..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go +++ /dev/null @@ -1,77 +0,0 @@ -package api - -func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/generate-root/attempt") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result GenerateRootStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { - body := map[string]interface{}{ - "otp": otp, - "pgp_key": pgpKey, - } - - r := c.c.NewRequest("PUT", "/v1/sys/generate-root/attempt") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result GenerateRootStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) GenerateRootCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/generate-root/attempt") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { - body := map[string]interface{}{ - "key": shard, - "nonce": nonce, - } - - r := c.c.NewRequest("PUT", "/v1/sys/generate-root/update") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result GenerateRootStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -type GenerateRootStatusResponse struct { - Nonce string - Started bool - Progress int - Required int - Complete bool - EncodedRootToken string `json:"encoded_root_token"` - PGPFingerprint string `json:"pgp_fingerprint"` -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go deleted file mode 100644 index 47e971824..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_init.go +++ /dev/null @@ -1,51 +0,0 @@ -package api - -func (c *Sys) InitStatus() (bool, error) { - r := c.c.NewRequest("GET", "/v1/sys/init") - resp, err := c.c.RawRequest(r) - if err != nil { - return false, err - } - defer resp.Body.Close() - - var result InitStatusResponse - err = resp.DecodeJSON(&result) - return result.Initialized, err -} - -func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/init") - if err := r.SetJSONBody(opts); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result InitResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -type InitRequest struct { - SecretShares int `json:"secret_shares"` - SecretThreshold int `json:"secret_threshold"` - StoredShares int `json:"stored_shares"` - PGPKeys []string `json:"pgp_keys"` - RecoveryShares int `json:"recovery_shares"` - RecoveryThreshold int `json:"recovery_threshold"` - RecoveryPGPKeys []string `json:"recovery_pgp_keys"` -} - -type InitStatusResponse struct { - Initialized bool -} - -type InitResponse struct { - Keys []string - RecoveryKeys []string `json:"recovery_keys"` - RootToken string `json:"root_token"` -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go deleted file mode 100644 index 201ac732e..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_leader.go +++ /dev/null @@ -1,20 +0,0 @@ -package api - -func (c *Sys) Leader() (*LeaderResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/leader") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result LeaderResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -type LeaderResponse struct { - HAEnabled bool `json:"ha_enabled"` - IsSelf bool `json:"is_self"` - LeaderAddress string `json:"leader_address"` -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_lease.go b/vendor/github.com/hashicorp/vault/api/sys_lease.go deleted file mode 100644 index e103990d4..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_lease.go +++ /dev/null @@ -1,45 +0,0 @@ -package api - -func (c *Sys) Renew(id string, increment int) (*Secret, error) { - r := c.c.NewRequest("PUT", "/v1/sys/renew/"+id) - - body := map[string]interface{}{"increment": increment} - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - return ParseSecret(resp.Body) -} - -func (c *Sys) Revoke(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) RevokePrefix(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) RevokeForce(id string) error { - r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go deleted file mode 100644 index 623a14628..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ /dev/null @@ -1,113 +0,0 @@ -package api - -import ( - "fmt" - - "github.com/fatih/structs" -) - -func (c *Sys) ListMounts() (map[string]*MountOutput, error) { - r := c.c.NewRequest("GET", "/v1/sys/mounts") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result map[string]*MountOutput - err = resp.DecodeJSON(&result) - return result, err -} - -func (c *Sys) Mount(path string, mountInfo *MountInput) error { - body := structs.Map(mountInfo) - - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path)) - if err := r.SetJSONBody(body); err != nil { - return err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (c *Sys) Unmount(path string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) Remount(from, to string) error { - body := map[string]interface{}{ - "from": from, - "to": to, - } - - r := c.c.NewRequest("POST", "/v1/sys/remount") - if err := r.SetJSONBody(body); err != nil { - return err - } - - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) TuneMount(path string, config MountConfigInput) error { - body := structs.Map(config) - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) - if err := r.SetJSONBody(body); err != nil { - return err - } - - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { - r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var result MountConfigOutput - err = resp.DecodeJSON(&result) - return &result, err -} - -type MountInput struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Config MountConfigInput `json:"config" structs:"config"` -} - -type MountConfigInput struct { - DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` -} - -type MountOutput struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Config MountConfigOutput `json:"config" structs:"config"` -} - -type MountConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go deleted file mode 100644 index 5f013eb5e..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_policy.go +++ /dev/null @@ -1,72 +0,0 @@ -package api - -import ( - "fmt" -) - -func (c *Sys) ListPolicies() ([]string, error) { - r := c.c.NewRequest("GET", "/v1/sys/policy") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result listPoliciesResp - err = resp.DecodeJSON(&result) - return result.Policies, err -} - -func (c *Sys) GetPolicy(name string) (string, error) { - r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name)) - resp, err := c.c.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - if resp.StatusCode == 404 { - return "", nil - } - } - if err != nil { - return "", err - } - - var result getPoliciesResp - err = resp.DecodeJSON(&result) - return result.Rules, err -} - -func (c *Sys) PutPolicy(name, rules string) error { - body := map[string]string{ - "rules": rules, - } - - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policy/%s", name)) - if err := r.SetJSONBody(body); err != nil { - return err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (c *Sys) DeletePolicy(name string) error { - r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name)) - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -type getPoliciesResp struct { - Rules string `json:"rules"` -} - -type listPoliciesResp struct { - Policies []string `json:"policies"` -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go deleted file mode 100644 index 4fbfbb9fc..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_rekey.go +++ /dev/null @@ -1,200 +0,0 @@ -package api - -func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/init") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") - if err := r.SetJSONBody(config); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { - r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init") - if err := r.SetJSONBody(config); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) RekeyRecoveryKeyCancel() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { - body := map[string]interface{}{ - "key": shard, - "nonce": nonce, - } - - r := c.c.NewRequest("PUT", "/v1/sys/rekey/update") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyUpdateResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { - body := map[string]interface{}{ - "key": shard, - "nonce": nonce, - } - - r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyUpdateResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyRetrieveResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result RekeyRetrieveResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -func (c *Sys) RekeyDeleteBackup() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - - return err -} - -func (c *Sys) RekeyDeleteRecoveryBackup() error { - r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - - return err -} - -type RekeyInitRequest struct { - SecretShares int `json:"secret_shares"` - SecretThreshold int `json:"secret_threshold"` - PGPKeys []string `json:"pgp_keys"` - Backup bool -} - -type RekeyStatusResponse struct { - Nonce string - Started bool - T int - N int - Progress int - Required int - PGPFingerprints []string `json:"pgp_fingerprints"` - Backup bool -} - -type RekeyUpdateResponse struct { - Nonce string - Complete bool - Keys []string - PGPFingerprints []string `json:"pgp_fingerprints"` - Backup bool -} - -type RekeyRetrieveResponse struct { - Nonce string - Keys map[string][]string -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go deleted file mode 100644 index 22f71d78a..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_rotate.go +++ /dev/null @@ -1,30 +0,0 @@ -package api - -import "time" - -func (c *Sys) Rotate() error { - r := c.c.NewRequest("POST", "/v1/sys/rotate") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) KeyStatus() (*KeyStatus, error) { - r := c.c.NewRequest("GET", "/v1/sys/key-status") - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - result := new(KeyStatus) - err = resp.DecodeJSON(result) - return result, err -} - -type KeyStatus struct { - Term int - InstallTime time.Time `json:"install_time"` -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go deleted file mode 100644 index 2a5ad4be2..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ /dev/null @@ -1,56 +0,0 @@ -package api - -func (c *Sys) SealStatus() (*SealStatusResponse, error) { - r := c.c.NewRequest("GET", "/v1/sys/seal-status") - return sealStatusRequest(c, r) -} - -func (c *Sys) Seal() error { - r := c.c.NewRequest("PUT", "/v1/sys/seal") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { - body := map[string]interface{}{"reset": true} - - r := c.c.NewRequest("PUT", "/v1/sys/unseal") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - return sealStatusRequest(c, r) -} - -func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { - body := map[string]interface{}{"key": shard} - - r := c.c.NewRequest("PUT", "/v1/sys/unseal") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - return sealStatusRequest(c, r) -} - -func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { - resp, err := c.c.RawRequest(r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result SealStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -type SealStatusResponse struct { - Sealed bool - T int - N int - Progress int -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go deleted file mode 100644 index 421e5f19f..000000000 --- a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go +++ /dev/null @@ -1,10 +0,0 @@ -package api - -func (c *Sys) StepDown() error { - r := c.c.NewRequest("PUT", "/v1/sys/step-down") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() - } - return err -} diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index b0d98d81d..000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,104 +0,0 @@ -## Changelog - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - Thanks to @mgurov for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE deleted file mode 100644 index 7eab43b6b..000000000 --- a/vendor/github.com/magiconair/properties/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -goproperties - properties file decoder for Go - -Copyright (c) 2013-2014 - Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index 258fbb2b5..000000000 --- a/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,100 +0,0 @@ -Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties) -======== - -#### Current version: 1.7.4 - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties) - -Getting Started ---------------- - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -Installation and Upgrade ------------------------- - -``` -$ go get -u github.com/magiconair/properties -``` - -License -------- - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -ToDo ----- -* Dump contents with passwords and secrets obscured diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 0a961bb04..000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 36c836808..000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precendence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alterantive properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -// -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 0d775e035..000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index c63fcc60d..000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// acceptRunUntil consumes a run of runes up to a terminator. -func (l *lexer) acceptRunUntil(term rune) { - for term != l.next() { - } - l.backup() -} - -// hasText returns true if the current parsed text is not empty. -func (l *lexer) isNotEmpty() bool { - return l.pos > l.start -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 278cc2ea0..000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // UTF8 interprets the input data as UTF-8. - UTF8 Encoding = 1 << iota - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - return loadBuf(buf, enc) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - return loadBuf([]byte(s), UTF8) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - return loadAll([]string{filename}, enc, false) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - return loadAll(filenames, enc, ignoreMissing) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func LoadURL(url string) (*Properties, error) { - return loadAll([]string{url}, UTF8, false) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code will -// not be reported as error. See LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - return loadAll(urls, UTF8, ignoreMissing) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - return loadAll(names, enc, ignoreMissing) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func loadBuf(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - return p, p.check() -} - -func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - result := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - var p *Properties - if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") { - p, err = loadURL(n, ignoreMissing) - } else { - p, err = loadFile(n, enc, ignoreMissing) - } - if err != nil { - return nil, err - } - result.Merge(p) - - } - return result, result.check() -} - -func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if ignoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - p, err := parse(convert(data, enc)) - if err != nil { - return nil, err - } - return p, nil -} - -func loadURL(url string, ignoreMissing bool) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - if resp.StatusCode == 404 && ignoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - if err = resp.Body.Close(); err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": - enc = ISO_8859_1 - case "", "text/plain; charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - p, err := parse(convert(body, enc)) - if err != nil { - return nil, err - } - return p, nil -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, make(map[string]bool), "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index 90f555cb9..000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expect(expected itemType) (token item) { - token = p.lex.nextItem() - if token.typ != expected { - p.unexpected(token) - } - return token -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } - return -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index 85bb18618..000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,811 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "fmt" - "io" - "log" - "os" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) - if err != nil { - return - } - n += x - } - } - } - } - - x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } - -outer: - for _, otherKey := range other.k { - for _, key := range p.k { - if otherKey == key { - continue outer - } - } - p.k = append(p.k, otherKey) - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for _, value := range p.m { - if _, err := p.expand(value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, make(map[string]bool), p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys map[string]bool, prefix, postfix string, values map[string]string) (string, error) { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - if _, ok := keys[key]; ok { - return "", fmt.Errorf("circular reference") - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - - // remember that we've seen the key - keys[key] = true - - return expand(s[:start]+val+s[end+1:], keys, prefix, postfix, values) -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index 2e907d540..000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b..000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 8996b0221..000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,137 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -func dirUnix() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // If that fails, try getent - var stdout bytes.Buffer - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If "getent" is missing, ignore it - if err == exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd = exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - if home == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 659d6885f..000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index aa91f76ce..000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,151 +0,0 @@ -package mapstructure - -import ( - "errors" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { - // Build our arguments that reflect expects - argVals := make([]reflect.Value, 3) - argVals[0] = reflect.ValueOf(from) - argVals[1] = reflect.ValueOf(to) - argVals[2] = reflect.ValueOf(data) - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from, to, data) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - var err error - for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) - if err != nil { - return nil, err - } - - // Modify the from kind to be correct with the new data - f = reflect.ValueOf(data).Type() - } - - return data, nil - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } else { - return "0", nil - } - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index a367a95b6..000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,767 +0,0 @@ -// The mapstructure package exposes functionality to convert an -// abitrary map[string]interface{} into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -package mapstructure - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. - // - // If an error is returned, the entire decode will fail with that - // error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - WeaklyTypedInput bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string -} - -// Decode takes a map and uses reflection to convert it into the -// given Go native structure. val must be a pointer to a struct. -func Decode(m interface{}, rawVal interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: rawVal, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(raw interface{}) error { - return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { - if data == nil { - // If the data is nil, then we don't set anything. - return nil - } - - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - // If the data value is invalid, then we just set the value - // to be the zero value. - val.Set(reflect.Zero(val.Type())) - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the data. - var err error - data, err = DecodeHookExec( - d.config.DecodeHook, - dataVal.Type(), val.Type(), data) - if err != nil { - return err - } - } - - var err error - dataKind := getKind(val) - switch dataKind { - case reflect.Bool: - err = d.decodeBool(name, data, val) - case reflect.Interface: - err = d.decodeBasic(name, data, val) - case reflect.String: - err = d.decodeString(name, data, val) - case reflect.Int: - err = d.decodeInt(name, data, val) - case reflect.Uint: - err = d.decodeUint(name, data, val) - case reflect.Float32: - err = d.decodeFloat(name, data, val) - case reflect.Struct: - err = d.decodeStruct(name, data, val) - case reflect.Map: - err = d.decodeMap(name, data, val) - case reflect.Ptr: - err = d.decodePtr(name, data, val) - case reflect.Slice: - err = d.decodeSlice(name, data, val) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, dataKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metadata. - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch { - case elemKind == reflect.Uint8: - val.SetString(string(dataVal.Interface().([]uint8))) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(float64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if dataVal.Kind() != reflect.Map { - // In weak mode, we accept a slice of maps as an input... - if d.config.WeaklyTypedInput { - switch dataVal.Kind() { - case reflect.Array, reflect.Slice: - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - fmt.Sprintf("%s[%d]", name, i), - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil - } - } - - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } - - // Accumulate errors - errors := make([]string, 0) - - for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - realVal := reflect.New(valElemType) - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } - - val.Set(realVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - // Accept empty map instead of array/slice in weakly typed mode - if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } else { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - } - - // Make a new slice to hold our result, same size as the original data. - valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valSlice.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) - } - - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() - - if fieldType.Anonymous { - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) - continue - } - } - - // If "squash" is specified in the tag, we squash the field down. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) - } else { - structs = append(structs, val.FieldByName(fieldType.Name)) - } - continue - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - for fieldType, field := range fields { - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey, _ := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if strings.EqualFold(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Just ignore. - continue - } - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - } - - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { - errors = appendErrors(errors, err) - } - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey, _ := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey, _ := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = fmt.Sprintf("%s.%s", name, key) - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - } - - return nil -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} diff --git a/vendor/github.com/pascaldekloe/goe/verify/values.go b/vendor/github.com/pascaldekloe/goe/verify/values.go deleted file mode 100644 index e214033c3..000000000 --- a/vendor/github.com/pascaldekloe/goe/verify/values.go +++ /dev/null @@ -1,168 +0,0 @@ -package verify - -import ( - "fmt" - "reflect" - "strings" - "testing" -) - -// Values verifies that got has all the content, and only the content, defined by want. -func Values(t *testing.T, name string, got, want interface{}) (ok bool) { - tr := travel{} - tr.values(reflect.ValueOf(got), reflect.ValueOf(want), nil) - - fail := tr.report(name) - if fail != "" { - t.Error(fail) - return false - } - - return true -} - -func (t *travel) values(got, want reflect.Value, path []*segment) { - if !want.IsValid() { - if got.IsValid() { - t.differ(path, "Unwanted %s", got.Type()) - } - return - } - if !got.IsValid() { - t.differ(path, "Missing %s", want.Type()) - return - } - - if got.Type() != want.Type() { - t.differ(path, "Got type %s, want %s", got.Type(), want.Type()) - return - } - - switch got.Kind() { - - case reflect.Struct: - seg := &segment{format: "/%s"} - path = append(path, seg) - for i, n := 0, got.NumField(); i < n; i++ { - seg.x = got.Type().Field(i).Name - t.values(got.Field(i), want.Field(i), path) - } - path = path[:len(path)-1] - - case reflect.Slice, reflect.Array: - n := got.Len() - if n != want.Len() { - t.differ(path, "Got %d elements, want %d", n, want.Len()) - return - } - - seg := &segment{format: "[%d]"} - path = append(path, seg) - for i := 0; i < n; i++ { - seg.x = i - t.values(got.Index(i), want.Index(i), path) - } - path = path[:len(path)-1] - - case reflect.Ptr, reflect.Interface: - t.values(got.Elem(), want.Elem(), path) - - case reflect.Map: - seg := &segment{} - path = append(path, seg) - for _, key := range want.MapKeys() { - applyKeySeg(seg, key) - t.values(got.MapIndex(key), want.MapIndex(key), path) - } - - for _, key := range got.MapKeys() { - v := want.MapIndex(key) - if v.IsValid() { - continue - } - applyKeySeg(seg, key) - t.values(got.MapIndex(key), v, path) - } - path = path[:len(path)-1] - - case reflect.Func: - t.differ(path, "Can't compare functions") - - default: - a, b := asInterface(got), asInterface(want) - if a != b { - t.differ(path, differMsg(a, b)) - } - - } -} - -func applyKeySeg(dst *segment, key reflect.Value) { - if key.Kind() == reflect.String { - dst.format = "[%q]" - } else { - dst.format = "[%v]" - } - dst.x = asInterface(key) -} - -func asInterface(v reflect.Value) interface{} { - switch v.Kind() { - case reflect.Bool: - return v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return v.Uint() - case reflect.Float32, reflect.Float64: - return v.Float() - case reflect.Complex64, reflect.Complex128: - return v.Complex() - case reflect.String: - return v.String() - default: - return v.Interface() - } -} - -func differMsg(got, want interface{}) string { - switch got.(type) { - case int64: - g, w := got.(int64), want.(int64) - if g < 0xA && g > -0xA && w < 0xA && w > -0xA { - return fmt.Sprintf("Got %d, want %d", got, want) - } - return fmt.Sprintf("Got %d (0x%x), want %d (0x%x)", got, got, want, want) - case uint64: - g, w := got.(uint64), want.(uint64) - if g < 0xA && w < 0xA { - return fmt.Sprintf("Got %d, want %d", got, want) - } - return fmt.Sprintf("Got %d (0x%x), want %d (0x%x)", got, got, want, want) - case float64, complex128: - return fmt.Sprintf("Got %f (%e), want %f (%e)", got, got, want, want) - case string: - a, b := got.(string), want.(string) - if len(a) > len(b) { - a, b = b, a - } - r := strings.NewReader(b) - - var differAt int - for i, c := range a { - o, _, _ := r.ReadRune() - if c != o { - differAt = i - break - } - } - - format := "Got %q, want %q" - if differAt > 0 { - format += fmt.Sprintf("\n %s^", strings.Repeat(" ", differAt)) - } - return fmt.Sprintf(format, got, want) - default: - return fmt.Sprintf("Got %v, want %v", got, want) - } -} diff --git a/vendor/github.com/pascaldekloe/goe/verify/verify.go b/vendor/github.com/pascaldekloe/goe/verify/verify.go deleted file mode 100644 index d98c014fc..000000000 --- a/vendor/github.com/pascaldekloe/goe/verify/verify.go +++ /dev/null @@ -1,73 +0,0 @@ -// Package verify offers convenience routenes for content verification. -package verify - -import ( - "bytes" - "fmt" - "path" - "runtime" - "strings" -) - -// travel is the verification state -type travel struct { - diffs []differ -} - -// differ is a verification failure. -type differ struct { - // path is the expression to the content. - path string - // msg has a reason. - msg string -} - -// segment is a differ.path component used for lazy formatting. -type segment struct { - format string - x interface{} -} - -func (t *travel) differ(path []*segment, msg string, args ...interface{}) { - var buf bytes.Buffer - for _, s := range path { - buf.WriteString(fmt.Sprintf(s.format, s.x)) - } - - t.diffs = append(t.diffs, differ{ - msg: fmt.Sprintf(msg, args...), - path: buf.String(), - }) -} - -func (t *travel) report(name string) string { - if len(t.diffs) == 0 { - return "" - } - - var buf bytes.Buffer - - buf.WriteString("verification for ") - buf.WriteString(name) - if _, file, lineno, ok := runtime.Caller(2); ok { - fmt.Fprintf(&buf, " at %s:%d", path.Base(file), lineno) - } - buf.WriteByte(':') - - for _, d := range t.diffs { - buf.WriteByte('\n') - if d.path != "" { - buf.WriteString(d.path) - buf.WriteString(": ") - } - lines := strings.Split(d.msg, "\n") - buf.WriteString(lines[0]) - for _, l := range lines[1:] { - buf.WriteByte('\n') - buf.WriteString(strings.Repeat(" ", len(d.path)+2)) - buf.WriteString(l) - } - } - - return buf.String() -} diff --git a/vendor/github.com/pkg/profile/AUTHORS b/vendor/github.com/pkg/profile/AUTHORS deleted file mode 100644 index 00441d354..000000000 --- a/vendor/github.com/pkg/profile/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Dave Cheney diff --git a/vendor/github.com/pkg/profile/LICENSE b/vendor/github.com/pkg/profile/LICENSE deleted file mode 100644 index f747a8411..000000000 --- a/vendor/github.com/pkg/profile/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Dave Cheney. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/profile/README.md b/vendor/github.com/pkg/profile/README.md deleted file mode 100644 index 37bfa58c5..000000000 --- a/vendor/github.com/pkg/profile/README.md +++ /dev/null @@ -1,54 +0,0 @@ -profile -======= - -Simple profiling support package for Go - -[![Build Status](https://travis-ci.org/pkg/profile.svg?branch=master)](https://travis-ci.org/pkg/profile) [![GoDoc](http://godoc.org/github.com/pkg/profile?status.svg)](http://godoc.org/github.com/pkg/profile) - - -installation ------------- - - go get github.com/pkg/profile - -usage ------ - -Enabling profiling in your application is as simple as one line at the top of your main function - -```go -import "github.com/pkg/profile" - -func main() { - defer profile.Start().Stop() - ... -} -``` - -options -------- - -What to profile is controlled by config value passed to profile.Start. -By default CPU profiling is enabled. - -```go -import "github.com/pkg/profile" - -func main() { - // p.Stop() must be called before the program exits to - // ensure profiling information is written to disk. - p := profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook) - ... -} -``` - -Several convenience package level values are provided for cpu, memory, and block (contention) profiling. - -For more complex options, consult the [documentation](http://godoc.org/github.com/pkg/profile). - -contributing ------------- - -We welcome pull requests, bug fixes and issue reports. - -Before proposing a change, please discuss it first by raising an issue. diff --git a/vendor/github.com/pkg/profile/mutex.go b/vendor/github.com/pkg/profile/mutex.go deleted file mode 100644 index e69c5b44d..000000000 --- a/vendor/github.com/pkg/profile/mutex.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.8 - -package profile - -import "runtime" - -func enableMutexProfile() { - runtime.SetMutexProfileFraction(1) -} - -func disableMutexProfile() { - runtime.SetMutexProfileFraction(0) -} diff --git a/vendor/github.com/pkg/profile/mutex17.go b/vendor/github.com/pkg/profile/mutex17.go deleted file mode 100644 index b004c21d5..000000000 --- a/vendor/github.com/pkg/profile/mutex17.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.8 - -package profile - -// mock mutex support for Go 1.7 and earlier. - -func enableMutexProfile() {} - -func disableMutexProfile() {} diff --git a/vendor/github.com/pkg/profile/profile.go b/vendor/github.com/pkg/profile/profile.go deleted file mode 100644 index c44913a4c..000000000 --- a/vendor/github.com/pkg/profile/profile.go +++ /dev/null @@ -1,244 +0,0 @@ -// Package profile provides a simple way to manage runtime/pprof -// profiling of your Go application. -package profile - -import ( - "io/ioutil" - "log" - "os" - "os/signal" - "path/filepath" - "runtime" - "runtime/pprof" - "sync/atomic" -) - -const ( - cpuMode = iota - memMode - mutexMode - blockMode - traceMode -) - -// Profile represents an active profiling session. -type Profile struct { - // quiet suppresses informational messages during profiling. - quiet bool - - // noShutdownHook controls whether the profiling package should - // hook SIGINT to write profiles cleanly. - noShutdownHook bool - - // mode holds the type of profiling that will be made - mode int - - // path holds the base path where various profiling files are written. - // If blank, the base path will be generated by ioutil.TempDir. - path string - - // memProfileRate holds the rate for the memory profile. - memProfileRate int - - // closer holds a cleanup function that run after each profile - closer func() - - // stopped records if a call to profile.Stop has been made - stopped uint32 -} - -// NoShutdownHook controls whether the profiling package should -// hook SIGINT to write profiles cleanly. -// Programs with more sophisticated signal handling should set -// this to true and ensure the Stop() function returned from Start() -// is called during shutdown. -func NoShutdownHook(p *Profile) { p.noShutdownHook = true } - -// Quiet suppresses informational messages during profiling. -func Quiet(p *Profile) { p.quiet = true } - -// CPUProfile enables cpu profiling. -// It disables any previous profiling settings. -func CPUProfile(p *Profile) { p.mode = cpuMode } - -// DefaultMemProfileRate is the default memory profiling rate. -// See also http://golang.org/pkg/runtime/#pkg-variables -const DefaultMemProfileRate = 4096 - -// MemProfile enables memory profiling. -// It disables any previous profiling settings. -func MemProfile(p *Profile) { - p.memProfileRate = DefaultMemProfileRate - p.mode = memMode -} - -// MemProfileRate enables memory profiling at the preferred rate. -// It disables any previous profiling settings. -func MemProfileRate(rate int) func(*Profile) { - return func(p *Profile) { - p.memProfileRate = rate - p.mode = memMode - } -} - -// MutexProfile enables mutex profiling. -// It disables any previous profiling settings. -// -// Mutex profiling is a no-op before go1.8. -func MutexProfile(p *Profile) { p.mode = mutexMode } - -// BlockProfile enables block (contention) profiling. -// It disables any previous profiling settings. -func BlockProfile(p *Profile) { p.mode = blockMode } - -// Trace profile controls if execution tracing will be enabled. It disables any previous profiling settings. -func TraceProfile(p *Profile) { p.mode = traceMode } - -// ProfilePath controls the base path where various profiling -// files are written. If blank, the base path will be generated -// by ioutil.TempDir. -func ProfilePath(path string) func(*Profile) { - return func(p *Profile) { - p.path = path - } -} - -// Stop stops the profile and flushes any unwritten data. -func (p *Profile) Stop() { - if !atomic.CompareAndSwapUint32(&p.stopped, 0, 1) { - // someone has already called close - return - } - p.closer() - atomic.StoreUint32(&started, 0) -} - -// started is non zero if a profile is running. -var started uint32 - -// Start starts a new profiling session. -// The caller should call the Stop method on the value returned -// to cleanly stop profiling. -func Start(options ...func(*Profile)) interface { - Stop() -} { - if !atomic.CompareAndSwapUint32(&started, 0, 1) { - log.Fatal("profile: Start() already called") - } - - var prof Profile - for _, option := range options { - option(&prof) - } - - path, err := func() (string, error) { - if p := prof.path; p != "" { - return p, os.MkdirAll(p, 0777) - } - return ioutil.TempDir("", "profile") - }() - - if err != nil { - log.Fatalf("profile: could not create initial output directory: %v", err) - } - - logf := func(format string, args ...interface{}) { - if !prof.quiet { - log.Printf(format, args...) - } - } - - switch prof.mode { - case cpuMode: - fn := filepath.Join(path, "cpu.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create cpu profile %q: %v", fn, err) - } - logf("profile: cpu profiling enabled, %s", fn) - pprof.StartCPUProfile(f) - prof.closer = func() { - pprof.StopCPUProfile() - f.Close() - logf("profile: cpu profiling disabled, %s", fn) - } - - case memMode: - fn := filepath.Join(path, "mem.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create memory profile %q: %v", fn, err) - } - old := runtime.MemProfileRate - runtime.MemProfileRate = prof.memProfileRate - logf("profile: memory profiling enabled (rate %d), %s", runtime.MemProfileRate, fn) - prof.closer = func() { - pprof.Lookup("heap").WriteTo(f, 0) - f.Close() - runtime.MemProfileRate = old - logf("profile: memory profiling disabled, %s", fn) - } - - case mutexMode: - fn := filepath.Join(path, "mutex.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create mutex profile %q: %v", fn, err) - } - enableMutexProfile() - logf("profile: mutex profiling enabled, %s", fn) - prof.closer = func() { - if mp := pprof.Lookup("mutex"); mp != nil { - mp.WriteTo(f, 0) - } - f.Close() - disableMutexProfile() - logf("profile: mutex profiling disabled, %s", fn) - } - - case blockMode: - fn := filepath.Join(path, "block.pprof") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create block profile %q: %v", fn, err) - } - runtime.SetBlockProfileRate(1) - logf("profile: block profiling enabled, %s", fn) - prof.closer = func() { - pprof.Lookup("block").WriteTo(f, 0) - f.Close() - runtime.SetBlockProfileRate(0) - logf("profile: block profiling disabled, %s", fn) - } - - case traceMode: - fn := filepath.Join(path, "trace.out") - f, err := os.Create(fn) - if err != nil { - log.Fatalf("profile: could not create trace output file %q: %v", fn, err) - } - if err := startTrace(f); err != nil { - log.Fatalf("profile: could not start trace: %v", err) - } - logf("profile: trace enabled, %s", fn) - prof.closer = func() { - stopTrace() - logf("profile: trace disabled, %s", fn) - } - } - - if !prof.noShutdownHook { - go func() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - <-c - - log.Println("profile: caught interrupt, stopping profiles") - prof.Stop() - - os.Exit(0) - }() - } - - return &prof -} diff --git a/vendor/github.com/pkg/profile/trace.go b/vendor/github.com/pkg/profile/trace.go deleted file mode 100644 index b349ed8b2..000000000 --- a/vendor/github.com/pkg/profile/trace.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build go1.7 - -package profile - -import "runtime/trace" - -var startTrace = trace.Start -var stopTrace = trace.Stop diff --git a/vendor/github.com/pkg/profile/trace16.go b/vendor/github.com/pkg/profile/trace16.go deleted file mode 100644 index 6aa6566ef..000000000 --- a/vendor/github.com/pkg/profile/trace16.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !go1.7 - -package profile - -import "io" - -// mock trace support for Go 1.6 and earlier. - -func startTrace(w io.Writer) error { return nil } -func stopTrace() {} diff --git a/vendor/github.com/pubnub/go-metrics-statsd/LICENSE b/vendor/github.com/pubnub/go-metrics-statsd/LICENSE deleted file mode 100644 index 78635365d..000000000 --- a/vendor/github.com/pubnub/go-metrics-statsd/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 PubNub - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/pubnub/go-metrics-statsd/README.md b/vendor/github.com/pubnub/go-metrics-statsd/README.md deleted file mode 100644 index a6cc5f866..000000000 --- a/vendor/github.com/pubnub/go-metrics-statsd/README.md +++ /dev/null @@ -1,18 +0,0 @@ -## Intro -This library is based on the cyberdelia [graphite library](https://github.com/cyberdelia/go-metrics-graphite) just modifying the wireformat for statsd - -This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) -library which will post the metrics to Statsd. It was originally part of the -`go-metrics` library itself, but has been split off to make maintenance of -both the core library and the client easier. - -### Usage - -```go -import "github.com/pubnub/go-metrics-statsd" - - -go statsd.StatsD(metrics.DefaultRegistry, - 1*time.Second, "some.prefix", addr) -``` - diff --git a/vendor/github.com/pubnub/go-metrics-statsd/statsd.go b/vendor/github.com/pubnub/go-metrics-statsd/statsd.go deleted file mode 100644 index 27b8ea7e6..000000000 --- a/vendor/github.com/pubnub/go-metrics-statsd/statsd.go +++ /dev/null @@ -1,117 +0,0 @@ -package statsd - -import ( - "bufio" - "fmt" - "log" - "net" - "reflect" - "strconv" - "strings" - "time" - - "github.com/rcrowley/go-metrics" -) - -// StatsDConfig provides a container with -// configuration parameters for the StatsD exporter -type StatsDConfig struct { - Addr *net.UDPAddr // Network address to connect to - Registry metrics.Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// StatsD is a blocking exporter function which reports metrics in r -// to a statsd server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func StatsD(r metrics.Registry, d time.Duration, prefix string, addr *net.UDPAddr) { - StatsDWithConfig(StatsDConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// StatsDWithConfig is a blocking exporter function just like StatsD, -// but it takes a StatsDConfig instead. -func StatsDWithConfig(c StatsDConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := statsd(&c); nil != err { - log.Println(err) - } - } -} - -func statsd(c *StatsDConfig) error { - du := float64(c.DurationUnit) - - conn, err := net.DialUDP("udp", nil, c.Addr) - - if nil != err { - return err - } - - // this will be executed when statsd func returns - defer conn.Close() - - // constuct a buffer to write statsd wire format - w := bufio.NewWriter(conn) - - // for each metric in the registry format into statsd wireformat and send - c.Registry.Each(func(name string, metric interface{}) { - switch m := metric.(type) { - case metrics.Counter: - fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, m.Count()) - case metrics.Gauge: - fmt.Fprintf(w, "%s--%s.value:%d|g\n", c.Prefix, name, m.Value()) - case metrics.GaugeFloat64: - fmt.Fprintf(w, "%s--%s.value:%f|g\n", c.Prefix, name, m.Value()) - case metrics.Histogram: - h := m.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, h.Count()) - fmt.Fprintf(w, "%s--%s.min:%d|g\n", c.Prefix, name, h.Min()) - fmt.Fprintf(w, "%s--%s.max:%d|g\n", c.Prefix, name, h.Max()) - fmt.Fprintf(w, "%s--%s.mean:%.2f|g\n", c.Prefix, name, h.Mean()) - fmt.Fprintf(w, "%s--%s.std-dev:%.2f|g\n", c.Prefix, name, h.StdDev()) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s--%s.%s-percentile:%.2f|g\n", c.Prefix, name, key, ps[psIdx]) - } - case metrics.Meter: - ss := m.Snapshot() - fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, ss.Count()) - fmt.Fprintf(w, "%s--%s.one-minute:%.2f|g\n", c.Prefix, name, ss.Rate1()) - fmt.Fprintf(w, "%s--%s.five-minute:%.2f|g\n", c.Prefix, name, ss.Rate5()) - fmt.Fprintf(w, "%s--%s.fifteen-minute:%.2f|g\n", c.Prefix, name, ss.Rate15()) - fmt.Fprintf(w, "%s--%s.mean:%.2f|g\n", c.Prefix, name, ss.RateMean()) - case metrics.Timer: - t := m.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, t.Count()) - fmt.Fprintf(w, "%s--%s.min:%d|g\n", c.Prefix, name, t.Min()/int64(du)) - fmt.Fprintf(w, "%s--%s.max:%d|g\n", c.Prefix, name, t.Max()/int64(du)) - fmt.Fprintf(w, "%s--%s.mean:%.2f|g\n", c.Prefix, name, t.Mean()/du) - fmt.Fprintf(w, "%s--%s.std-dev:%.2f|g\n", c.Prefix, name, t.StdDev()/du) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s--%s.%s-percentile:%.2f|g\n", c.Prefix, name, key, ps[psIdx]/du) - } - fmt.Fprintf(w, "%s--%s.one-minute:%.2f|g\n", c.Prefix, name, t.Rate1()) - fmt.Fprintf(w, "%s--%s.five-minute:%.2f|g\n", c.Prefix, name, t.Rate5()) - fmt.Fprintf(w, "%s--%s.fifteen-minute:%.2f|g\n", c.Prefix, name, t.Rate15()) - fmt.Fprintf(w, "%s--%s.mean-rate:%.2f|g\n", c.Prefix, name, t.RateMean()) - default: - log.Println("[WARN] No Metric", c.Prefix, name, reflect.TypeOf(m)) - } - w.Flush() - }) - - return nil -} diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE deleted file mode 100644 index 363fa9ee7..000000000 --- a/vendor/github.com/rcrowley/go-metrics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md deleted file mode 100644 index b7356b5fc..000000000 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ /dev/null @@ -1,168 +0,0 @@ -go-metrics -========== - -![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -r := NewRegistry() -g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Register() is not threadsafe. For threadsafe metric registration use -GetOrRegister: - -```go -t := metrics.GetOrRegisterTimer("account.create.latency", nil) -t.Time(func() {}) -t.Update(47) -``` - -**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will -leak memory: - -```go -// Will call Stop() on the Meter to allow for garbage collection -metrics.Unregister("quux") -// Or similarly for a Timer that embeds a Meter -metrics.Unregister("bang") -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): - -```go - -import "github.com/cyberdelia/go-metrics-graphite" - -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Periodically emit every metric into InfluxDB: - -**NOTE:** this has been pulled out of the library due to constant fluctuations -in the InfluxDB API. In fact, all client libraries are on their way out. see -issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and -[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. - -```go -import "github.com/vrischmann/go-metrics-influxdb" - -go influxdb.InfluxDB(metrics.DefaultRegistry, - 10e9, - "127.0.0.1:8086", - "database-name", - "username", - "password" -) -``` - -Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): - -**Note**: the client included with this repository under the `librato` package -has been deprecated and moved to the repository linked above. - -```go -import "github.com/mihasya/go-metrics-librato" - -go librato.Librato(metrics.DefaultRegistry, - 10e9, // interval - "example@example.com", // account owner email address - "token", // Librato API token - "hostname", // source - []float64{0.95}, // percentiles to send - time.Millisecond, // time unit -) -``` - -Periodically emit every metric to StatHat: - -```go -import "github.com/rcrowley/go-metrics/stathat" - -go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") -``` - -Maintain all metrics along with expvars at `/debug/metrics`: - -This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) -but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars -as well as all your go-metrics. - - -```go -import "github.com/rcrowley/go-metrics/exp" - -exp.Exp(metrics.DefaultRegistry) -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` - -Publishing Metrics ------------------- - -Clients are available for the following destinations: - -* Librato - https://github.com/mihasya/go-metrics-librato -* Graphite - https://github.com/cyberdelia/go-metrics-graphite -* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb -* Ganglia - https://github.com/appscode/metlia -* Prometheus - https://github.com/deathowl/go-metrics-prometheus -* DataDog - https://github.com/syntaqx/go-metrics-datadog -* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx -* Honeycomb - https://github.com/getspine/go-metrics-honeycomb -* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go deleted file mode 100644 index bb7b039cb..000000000 --- a/vendor/github.com/rcrowley/go-metrics/counter.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -import "sync/atomic" - -// Counters hold an int64 value that can be incremented and decremented. -type Counter interface { - Clear() - Count() int64 - Dec(int64) - Inc(int64) - Snapshot() Counter -} - -// GetOrRegisterCounter returns an existing Counter or constructs and registers -// a new StandardCounter. -func GetOrRegisterCounter(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounter).(Counter) -} - -// NewCounter constructs a new StandardCounter. -func NewCounter() Counter { - if UseNilMetrics { - return NilCounter{} - } - return &StandardCounter{0} -} - -// NewRegisteredCounter constructs and registers a new StandardCounter. -func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// CounterSnapshot is a read-only copy of another Counter. -type CounterSnapshot int64 - -// Clear panics. -func (CounterSnapshot) Clear() { - panic("Clear called on a CounterSnapshot") -} - -// Count returns the count at the time the snapshot was taken. -func (c CounterSnapshot) Count() int64 { return int64(c) } - -// Dec panics. -func (CounterSnapshot) Dec(int64) { - panic("Dec called on a CounterSnapshot") -} - -// Inc panics. -func (CounterSnapshot) Inc(int64) { - panic("Inc called on a CounterSnapshot") -} - -// Snapshot returns the snapshot. -func (c CounterSnapshot) Snapshot() Counter { return c } - -// NilCounter is a no-op Counter. -type NilCounter struct{} - -// Clear is a no-op. -func (NilCounter) Clear() {} - -// Count is a no-op. -func (NilCounter) Count() int64 { return 0 } - -// Dec is a no-op. -func (NilCounter) Dec(i int64) {} - -// Inc is a no-op. -func (NilCounter) Inc(i int64) {} - -// Snapshot is a no-op. -func (NilCounter) Snapshot() Counter { return NilCounter{} } - -// StandardCounter is the standard implementation of a Counter and uses the -// sync/atomic package to manage a single int64 value. -type StandardCounter struct { - count int64 -} - -// Clear sets the counter to zero. -func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) -} - -// Count returns the current count. -func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() Counter { - return CounterSnapshot(c.Count()) -} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go deleted file mode 100644 index 043ccefab..000000000 --- a/vendor/github.com/rcrowley/go-metrics/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "runtime/debug" - "time" -) - -var ( - debugMetrics struct { - GCStats struct { - LastGC Gauge - NumGC Gauge - Pause Histogram - //PauseQuantiles Histogram - PauseTotal Gauge - } - ReadGCStats Timer - } - gcStats debug.GCStats -) - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. -func CaptureDebugGCStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureDebugGCStatsOnce(r) - } -} - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. -// -// Be careful (but much less so) with this because debug.ReadGCStats calls -// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world -// operation, isn't something you want to be doing all the time. -func CaptureDebugGCStatsOnce(r Registry) { - lastGC := gcStats.LastGC - t := time.Now() - debug.ReadGCStats(&gcStats) - debugMetrics.ReadGCStats.UpdateSince(t) - - debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) - debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) - if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { - debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) - } - //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) - debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) -} - -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. -func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() - - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) -} - -// Allocate an initial slice for gcStats.Pause to avoid allocations during -// normal operation. -func init() { - gcStats.Pause = make([]time.Duration, 11) -} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go deleted file mode 100644 index a8183dd7e..000000000 --- a/vendor/github.com/rcrowley/go-metrics/ewma.go +++ /dev/null @@ -1,138 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" -) - -// EWMAs continuously calculate an exponentially-weighted moving average -// based on an outside source of clock ticks. -type EWMA interface { - Rate() float64 - Snapshot() EWMA - Tick() - Update(int64) -} - -// NewEWMA constructs a new EWMA with the given alpha. -func NewEWMA(alpha float64) EWMA { - if UseNilMetrics { - return NilEWMA{} - } - return &StandardEWMA{alpha: alpha} -} - -// NewEWMA1 constructs a new EWMA for a one-minute moving average. -func NewEWMA1() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/1)) -} - -// NewEWMA5 constructs a new EWMA for a five-minute moving average. -func NewEWMA5() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/5)) -} - -// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. -func NewEWMA15() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/15)) -} - -// EWMASnapshot is a read-only copy of another EWMA. -type EWMASnapshot float64 - -// Rate returns the rate of events per second at the time the snapshot was -// taken. -func (a EWMASnapshot) Rate() float64 { return float64(a) } - -// Snapshot returns the snapshot. -func (a EWMASnapshot) Snapshot() EWMA { return a } - -// Tick panics. -func (EWMASnapshot) Tick() { - panic("Tick called on an EWMASnapshot") -} - -// Update panics. -func (EWMASnapshot) Update(int64) { - panic("Update called on an EWMASnapshot") -} - -// NilEWMA is a no-op EWMA. -type NilEWMA struct{} - -// Rate is a no-op. -func (NilEWMA) Rate() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } - -// Tick is a no-op. -func (NilEWMA) Tick() {} - -// Update is a no-op. -func (NilEWMA) Update(n int64) {} - -// StandardEWMA is the standard implementation of an EWMA and tracks the number -// of uncounted events and processes them on each tick. It uses the -// sync/atomic package to manage uncounted events. -type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment - alpha float64 - rate uint64 - init uint32 - mutex sync.Mutex -} - -// Rate returns the moving average rate of events per second. -func (a *StandardEWMA) Rate() float64 { - currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) - return currentRate -} - -// Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMA { - return EWMASnapshot(a.Rate()) -} - -// Tick ticks the clock to update the moving average. It assumes it is called -// every five seconds. -func (a *StandardEWMA) Tick() { - // Optimization to avoid mutex locking in the hot-path. - if atomic.LoadUint32(&a.init) == 1 { - a.updateRate(a.fetchInstantRate()) - } else { - // Slow-path: this is only needed on the first Tick() and preserves transactional updating - // of init and rate in the else block. The first conditional is needed below because - // a different thread could have set a.init = 1 between the time of the first atomic load and when - // the lock was acquired. - a.mutex.Lock() - if atomic.LoadUint32(&a.init) == 1 { - // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section - // but again, this section is only invoked on the first successful Tick() operation. - a.updateRate(a.fetchInstantRate()) - } else { - atomic.StoreUint32(&a.init, 1) - atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) - } - a.mutex.Unlock() - } -} - -func (a *StandardEWMA) fetchInstantRate() float64 { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) - return instantRate -} - -func (a *StandardEWMA) updateRate(instantRate float64) { - currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) - currentRate += a.alpha * (instantRate - currentRate) - atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) -} - -// Update adds n uncounted events. -func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) -} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go deleted file mode 100644 index cb57a9388..000000000 --- a/vendor/github.com/rcrowley/go-metrics/gauge.go +++ /dev/null @@ -1,120 +0,0 @@ -package metrics - -import "sync/atomic" - -// Gauges hold an int64 value that can be set arbitrarily. -type Gauge interface { - Snapshot() Gauge - Update(int64) - Value() int64 -} - -// GetOrRegisterGauge returns an existing Gauge or constructs and registers a -// new StandardGauge. -func GetOrRegisterGauge(name string, r Registry) Gauge { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGauge).(Gauge) -} - -// NewGauge constructs a new StandardGauge. -func NewGauge() Gauge { - if UseNilMetrics { - return NilGauge{} - } - return &StandardGauge{0} -} - -// NewRegisteredGauge constructs and registers a new StandardGauge. -func NewRegisteredGauge(name string, r Registry) Gauge { - c := NewGauge() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGauge(f func() int64) Gauge { - if UseNilMetrics { - return NilGauge{} - } - return &FunctionalGauge{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { - c := NewFunctionalGauge(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeSnapshot is a read-only copy of another Gauge. -type GaugeSnapshot int64 - -// Snapshot returns the snapshot. -func (g GaugeSnapshot) Snapshot() Gauge { return g } - -// Update panics. -func (GaugeSnapshot) Update(int64) { - panic("Update called on a GaugeSnapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeSnapshot) Value() int64 { return int64(g) } - -// NilGauge is a no-op Gauge. -type NilGauge struct{} - -// Snapshot is a no-op. -func (NilGauge) Snapshot() Gauge { return NilGauge{} } - -// Update is a no-op. -func (NilGauge) Update(v int64) {} - -// Value is a no-op. -func (NilGauge) Value() int64 { return 0 } - -// StandardGauge is the standard implementation of a Gauge and uses the -// sync/atomic package to manage a single int64 value. -type StandardGauge struct { - value int64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() Gauge { - return GaugeSnapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) -} - -// Value returns the gauge's current value. -func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) -} - -// FunctionalGauge returns value from given function -type FunctionalGauge struct { - value func() int64 -} - -// Value returns the gauge's current value. -func (g FunctionalGauge) Value() int64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } - -// Update panics. -func (FunctionalGauge) Update(int64) { - panic("Update called on a FunctionalGauge") -} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go deleted file mode 100644 index 3962e6db0..000000000 --- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go +++ /dev/null @@ -1,125 +0,0 @@ -package metrics - -import ( - "math" - "sync/atomic" -) - -// GaugeFloat64s hold a float64 value that can be set arbitrarily. -type GaugeFloat64 interface { - Snapshot() GaugeFloat64 - Update(float64) - Value() float64 -} - -// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a -// new StandardGaugeFloat64. -func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) -} - -// NewGaugeFloat64 constructs a new StandardGaugeFloat64. -func NewGaugeFloat64() GaugeFloat64 { - if UseNilMetrics { - return NilGaugeFloat64{} - } - return &StandardGaugeFloat64{ - value: 0.0, - } -} - -// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. -func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { - c := NewGaugeFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { - if UseNilMetrics { - return NilGaugeFloat64{} - } - return &FunctionalGaugeFloat64{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { - c := NewFunctionalGaugeFloat64(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type GaugeFloat64Snapshot float64 - -// Snapshot returns the snapshot. -func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } - -// Update panics. -func (GaugeFloat64Snapshot) Update(float64) { - panic("Update called on a GaugeFloat64Snapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } - -// NilGauge is a no-op Gauge. -type NilGaugeFloat64 struct{} - -// Snapshot is a no-op. -func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } - -// Update is a no-op. -func (NilGaugeFloat64) Update(v float64) {} - -// Value is a no-op. -func (NilGaugeFloat64) Value() float64 { return 0.0 } - -// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// sync.Mutex to manage a single float64 value. -type StandardGaugeFloat64 struct { - value uint64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { - return GaugeFloat64Snapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGaugeFloat64) Update(v float64) { - atomic.StoreUint64(&g.value, math.Float64bits(v)) -} - -// Value returns the gauge's current value. -func (g *StandardGaugeFloat64) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.value)) -} - -// FunctionalGaugeFloat64 returns value from given function -type FunctionalGaugeFloat64 struct { - value func() float64 -} - -// Value returns the gauge's current value. -func (g FunctionalGaugeFloat64) Value() float64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } - -// Update panics. -func (FunctionalGaugeFloat64) Update(float64) { - panic("Update called on a FunctionalGaugeFloat64") -} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go deleted file mode 100644 index abd0a7d29..000000000 --- a/vendor/github.com/rcrowley/go-metrics/graphite.go +++ /dev/null @@ -1,113 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - for _ = range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -// GraphiteOnce performs a single submission to Graphite, returning a -// non-nil error on failed connections. This can be used in a loop -// similar to GraphiteWithConfig for custom error handling. -func GraphiteOnce(c GraphiteConfig) error { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - return graphite(&c) -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) - case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) - case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go deleted file mode 100644 index 445131cae..000000000 --- a/vendor/github.com/rcrowley/go-metrics/healthcheck.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -// Healthchecks hold an error value describing an arbitrary up/down status. -type Healthcheck interface { - Check() - Error() error - Healthy() - Unhealthy(error) -} - -// NewHealthcheck constructs a new Healthcheck which will use the given -// function to update its status. -func NewHealthcheck(f func(Healthcheck)) Healthcheck { - if UseNilMetrics { - return NilHealthcheck{} - } - return &StandardHealthcheck{nil, f} -} - -// NilHealthcheck is a no-op. -type NilHealthcheck struct{} - -// Check is a no-op. -func (NilHealthcheck) Check() {} - -// Error is a no-op. -func (NilHealthcheck) Error() error { return nil } - -// Healthy is a no-op. -func (NilHealthcheck) Healthy() {} - -// Unhealthy is a no-op. -func (NilHealthcheck) Unhealthy(error) {} - -// StandardHealthcheck is the standard implementation of a Healthcheck and -// stores the status and a function to call to update the status. -type StandardHealthcheck struct { - err error - f func(Healthcheck) -} - -// Check runs the healthcheck function to update the healthcheck's status. -func (h *StandardHealthcheck) Check() { - h.f(h) -} - -// Error returns the healthcheck's status, which will be nil if it is healthy. -func (h *StandardHealthcheck) Error() error { - return h.err -} - -// Healthy marks the healthcheck as healthy. -func (h *StandardHealthcheck) Healthy() { - h.err = nil -} - -// Unhealthy marks the healthcheck as unhealthy. The error is stored and -// may be retrieved by the Error method. -func (h *StandardHealthcheck) Unhealthy(err error) { - h.err = err -} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go deleted file mode 100644 index dbc837fe4..000000000 --- a/vendor/github.com/rcrowley/go-metrics/histogram.go +++ /dev/null @@ -1,202 +0,0 @@ -package metrics - -// Histograms calculate distribution statistics from a series of int64 values. -type Histogram interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Sample() Sample - Snapshot() Histogram - StdDev() float64 - Sum() int64 - Update(int64) - Variance() float64 -} - -// GetOrRegisterHistogram returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) -} - -// NewHistogram constructs a new StandardHistogram from a Sample. -func NewHistogram(s Sample) Histogram { - if UseNilMetrics { - return NilHistogram{} - } - return &StandardHistogram{sample: s} -} - -// NewRegisteredHistogram constructs and registers a new StandardHistogram from -// a Sample. -func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { - c := NewHistogram(s) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// HistogramSnapshot is a read-only copy of another Histogram. -type HistogramSnapshot struct { - sample *SampleSnapshot -} - -// Clear panics. -func (*HistogramSnapshot) Clear() { - panic("Clear called on a HistogramSnapshot") -} - -// Count returns the number of samples recorded at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample at the time the snapshot -// was taken. -func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the sample -// at the time the snapshot was taken. -func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *HistogramSnapshot) Sample() Sample { return h.sample } - -// Snapshot returns the snapshot. -func (h *HistogramSnapshot) Snapshot() Histogram { return h } - -// StdDev returns the standard deviation of the values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample at the time the snapshot was taken. -func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } - -// Update panics. -func (*HistogramSnapshot) Update(int64) { - panic("Update called on a HistogramSnapshot") -} - -// Variance returns the variance of inputs at the time the snapshot was taken. -func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } - -// NilHistogram is a no-op Histogram. -type NilHistogram struct{} - -// Clear is a no-op. -func (NilHistogram) Clear() {} - -// Count is a no-op. -func (NilHistogram) Count() int64 { return 0 } - -// Max is a no-op. -func (NilHistogram) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilHistogram) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilHistogram) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilHistogram) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilHistogram) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Sample is a no-op. -func (NilHistogram) Sample() Sample { return NilSample{} } - -// Snapshot is a no-op. -func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } - -// StdDev is a no-op. -func (NilHistogram) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilHistogram) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilHistogram) Update(v int64) {} - -// Variance is a no-op. -func (NilHistogram) Variance() float64 { return 0.0 } - -// StandardHistogram is the standard implementation of a Histogram and uses a -// Sample to bound its memory use. -type StandardHistogram struct { - sample Sample -} - -// Clear clears the histogram and its sample. -func (h *StandardHistogram) Clear() { h.sample.Clear() } - -// Count returns the number of samples recorded since the histogram was last -// cleared. -func (h *StandardHistogram) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample. -func (h *StandardHistogram) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample. -func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample. -func (h *StandardHistogram) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of the values in the sample. -func (h *StandardHistogram) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (h *StandardHistogram) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *StandardHistogram) Sample() Sample { return h.sample } - -// Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() Histogram { - return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} -} - -// StdDev returns the standard deviation of the values in the sample. -func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample. -func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } - -// Update samples a new value. -func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } - -// Variance returns the variance of the values in the sample. -func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go deleted file mode 100644 index 174b9477e..000000000 --- a/vendor/github.com/rcrowley/go-metrics/json.go +++ /dev/null @@ -1,31 +0,0 @@ -package metrics - -import ( - "encoding/json" - "io" - "time" -) - -// MarshalJSON returns a byte slice containing a JSON representation of all -// the metrics in the Registry. -func (r *StandardRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(r.GetAll()) -} - -// WriteJSON writes metrics from the given registry periodically to the -// specified io.Writer as JSON. -func WriteJSON(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteJSONOnce(r, w) - } -} - -// WriteJSONOnce writes metrics from the given registry to the specified -// io.Writer as JSON. -func WriteJSONOnce(r Registry, w io.Writer) { - json.NewEncoder(w).Encode(r) -} - -func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(p.GetAll()) -} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go deleted file mode 100644 index f8074c045..000000000 --- a/vendor/github.com/rcrowley/go-metrics/log.go +++ /dev/null @@ -1,80 +0,0 @@ -package metrics - -import ( - "time" -) - -type Logger interface { - Printf(format string, v ...interface{}) -} - -func Log(r Registry, freq time.Duration, l Logger) { - LogScaled(r, freq, time.Nanosecond, l) -} - -// Output each metric in the given registry periodically using the given -// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. -func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { - du := float64(scale) - duSuffix := scale.String()[1:] - - for _ = range time.Tick(freq) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Count()) - case Gauge: - l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Value()) - case GaugeFloat64: - l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - l.Printf("healthcheck %s\n", name) - l.Printf(" error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("histogram %s\n", name) - l.Printf(" count: %9d\n", h.Count()) - l.Printf(" min: %9d\n", h.Min()) - l.Printf(" max: %9d\n", h.Max()) - l.Printf(" mean: %12.2f\n", h.Mean()) - l.Printf(" stddev: %12.2f\n", h.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - l.Printf("meter %s\n", name) - l.Printf(" count: %9d\n", m.Count()) - l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) - l.Printf(" mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("timer %s\n", name) - l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) - l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) - l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) - l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) - l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) - l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) - l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) - l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) - l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) - l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) - l.Printf(" mean rate: %12.2f\n", t.RateMean()) - } - }) - } -} diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md deleted file mode 100644 index 47454f54b..000000000 --- a/vendor/github.com/rcrowley/go-metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go deleted file mode 100644 index 7807406a3..000000000 --- a/vendor/github.com/rcrowley/go-metrics/meter.go +++ /dev/null @@ -1,257 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" - "time" -) - -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { - Count() int64 - Mark(int64) - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Meter - Stop() -} - -// GetOrRegisterMeter returns an existing Meter or constructs and registers a -// new StandardMeter. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterMeter(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeter).(Meter) -} - -// NewMeter constructs a new StandardMeter and launches a goroutine. -// Be sure to call Stop() once the meter is of no use to allow for garbage collection. -func NewMeter() Meter { - if UseNilMetrics { - return NilMeter{} - } - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters[m] = struct{}{} - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewMeter constructs and registers a new StandardMeter and launches a -// goroutine. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredMeter(name string, r Registry) Meter { - c := NewMeter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// MeterSnapshot is a read-only copy of another Meter. -type MeterSnapshot struct { - count int64 - rate1, rate5, rate15, rateMean uint64 -} - -// Count returns the count of events at the time the snapshot was taken. -func (m *MeterSnapshot) Count() int64 { return m.count } - -// Mark panics. -func (*MeterSnapshot) Mark(n int64) { - panic("Mark called on a MeterSnapshot") -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } - -// Snapshot returns the snapshot. -func (m *MeterSnapshot) Snapshot() Meter { return m } - -// Stop is a no-op. -func (m *MeterSnapshot) Stop() {} - -// NilMeter is a no-op Meter. -type NilMeter struct{} - -// Count is a no-op. -func (NilMeter) Count() int64 { return 0 } - -// Mark is a no-op. -func (NilMeter) Mark(n int64) {} - -// Rate1 is a no-op. -func (NilMeter) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilMeter) Rate5() float64 { return 0.0 } - -// Rate15is a no-op. -func (NilMeter) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilMeter) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilMeter) Snapshot() Meter { return NilMeter{} } - -// Stop is a no-op. -func (NilMeter) Stop() {} - -// StandardMeter is the standard implementation of a Meter. -type StandardMeter struct { - // Only used on stop. - lock sync.Mutex - snapshot *MeterSnapshot - a1, a5, a15 EWMA - startTime time.Time - stopped uint32 -} - -func newStandardMeter() *StandardMeter { - return &StandardMeter{ - snapshot: &MeterSnapshot{}, - a1: NewEWMA1(), - a5: NewEWMA5(), - a15: NewEWMA15(), - startTime: time.Now(), - } -} - -// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. -func (m *StandardMeter) Stop() { - m.lock.Lock() - stopped := m.stopped - m.stopped = 1 - m.lock.Unlock() - if stopped != 1 { - arbiter.Lock() - delete(arbiter.meters, m) - arbiter.Unlock() - } -} - -// Count returns the number of events recorded. -func (m *StandardMeter) Count() int64 { - return atomic.LoadInt64(&m.snapshot.count) -} - -// Mark records the occurance of n events. -func (m *StandardMeter) Mark(n int64) { - if atomic.LoadUint32(&m.stopped) == 1 { - return - } - - atomic.AddInt64(&m.snapshot.count, n) - - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (m *StandardMeter) Rate1() float64 { - return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (m *StandardMeter) Rate5() float64 { - return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (m *StandardMeter) Rate15() float64 { - return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) -} - -// RateMean returns the meter's mean rate of events per second. -func (m *StandardMeter) RateMean() float64 { - return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) -} - -// Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() Meter { - copiedSnapshot := MeterSnapshot{ - count: atomic.LoadInt64(&m.snapshot.count), - rate1: atomic.LoadUint64(&m.snapshot.rate1), - rate5: atomic.LoadUint64(&m.snapshot.rate5), - rate15: atomic.LoadUint64(&m.snapshot.rate15), - rateMean: atomic.LoadUint64(&m.snapshot.rateMean), - } - return &copiedSnapshot -} - -func (m *StandardMeter) updateSnapshot() { - rate1 := math.Float64bits(m.a1.Rate()) - rate5 := math.Float64bits(m.a5.Rate()) - rate15 := math.Float64bits(m.a15.Rate()) - rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) - - atomic.StoreUint64(&m.snapshot.rate1, rate1) - atomic.StoreUint64(&m.snapshot.rate5, rate5) - atomic.StoreUint64(&m.snapshot.rate15, rate15) - atomic.StoreUint64(&m.snapshot.rateMean, rateMean) -} - -func (m *StandardMeter) tick() { - m.a1.Tick() - m.a5.Tick() - m.a15.Tick() - m.updateSnapshot() -} - -// meterArbiter ticks meters every 5s from a single goroutine. -// meters are references in a set for future stopping. -type meterArbiter struct { - sync.RWMutex - started bool - meters map[*StandardMeter]struct{} - ticker *time.Ticker -} - -var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} - -// Ticks meters on the scheduled interval -func (ma *meterArbiter) tick() { - for { - select { - case <-ma.ticker.C: - ma.tickMeters() - } - } -} - -func (ma *meterArbiter) tickMeters() { - ma.RLock() - defer ma.RUnlock() - for meter := range ma.meters { - meter.tick() - } -} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go deleted file mode 100644 index b97a49ed1..000000000 --- a/vendor/github.com/rcrowley/go-metrics/metrics.go +++ /dev/null @@ -1,13 +0,0 @@ -// Go port of Coda Hale's Metrics library -// -// -// -// Coda Hale's original work: -package metrics - -// UseNilMetrics is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go deleted file mode 100644 index 266b6c93d..000000000 --- a/vendor/github.com/rcrowley/go-metrics/opentsdb.go +++ /dev/null @@ -1,119 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "os" - "strings" - "time" -) - -var shortHostName string = "" - -// OpenTSDBConfig provides a container with configuration parameters for -// the OpenTSDB exporter -type OpenTSDBConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names -} - -// OpenTSDB is a blocking exporter function which reports metrics in r -// to a TSDB server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - }) -} - -// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, -// but it takes a OpenTSDBConfig instead. -func OpenTSDBWithConfig(c OpenTSDBConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := openTSDB(&c); nil != err { - log.Println(err) - } - } -} - -func getShortHostname() string { - if shortHostName == "" { - host, _ := os.Hostname() - if index := strings.Index(host, "."); index > 0 { - shortHostName = host[:index] - } else { - shortHostName = host - } - } - return shortHostName -} - -func openTSDB(c *OpenTSDBConfig) error { - shortHostname := getShortHostname() - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) - case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go deleted file mode 100644 index b3bab64e1..000000000 --- a/vendor/github.com/rcrowley/go-metrics/registry.go +++ /dev/null @@ -1,363 +0,0 @@ -package metrics - -import ( - "fmt" - "reflect" - "strings" - "sync" -) - -// DuplicateMetric is the error returned by Registry.Register when a metric -// already exists. If you mean to Register that metric you must first -// Unregister the existing metric. -type DuplicateMetric string - -func (err DuplicateMetric) Error() string { - return fmt.Sprintf("duplicate metric: %s", string(err)) -} - -// A Registry holds references to a set of metrics by name and can iterate -// over them, calling callback functions provided by the user. -// -// This is an interface so as to encourage other structs to implement -// the Registry API as appropriate. -type Registry interface { - - // Call the given function for each registered metric. - Each(func(string, interface{})) - - // Get the metric by the given name or nil if none is registered. - Get(string) interface{} - - // GetAll metrics in the Registry. - GetAll() map[string]map[string]interface{} - - // Gets an existing metric or registers the given one. - // The interface can be the metric to register if not found in registry, - // or a function returning the metric for lazy instantiation. - GetOrRegister(string, interface{}) interface{} - - // Register the given metric under the given name. - Register(string, interface{}) error - - // Run all registered healthchecks. - RunHealthchecks() - - // Unregister the metric with the given name. - Unregister(string) - - // Unregister all metrics. (Mostly for testing.) - UnregisterAll() -} - -// The standard implementation of a Registry is a mutex-protected map -// of names to metrics. -type StandardRegistry struct { - metrics map[string]interface{} - mutex sync.RWMutex -} - -// Create a new registry. -func NewRegistry() Registry { - return &StandardRegistry{metrics: make(map[string]interface{})} -} - -// Call the given function for each registered metric. -func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) - } -} - -// Get the metric by the given name or nil if none is registered. -func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.RLock() - defer r.mutex.RUnlock() - return r.metrics[name] -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - // access the read lock first which should be re-entrant - r.mutex.RLock() - metric, ok := r.metrics[name] - r.mutex.RUnlock() - if ok { - return metric - } - - // only take the write lock if we'll be modifying the metrics map - r.mutex.Lock() - defer r.mutex.Unlock() - if metric, ok := r.metrics[name]; ok { - return metric - } - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - r.register(name, i) - return i -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func (r *StandardRegistry) Register(name string, i interface{}) error { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.register(name, i) -} - -// Run all registered healthchecks. -func (r *StandardRegistry) RunHealthchecks() { - r.mutex.RLock() - defer r.mutex.RUnlock() - for _, i := range r.metrics { - if h, ok := i.(Healthcheck); ok { - h.Check() - } - } -} - -// GetAll metrics in the Registry -func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return data -} - -// Unregister the metric with the given name. -func (r *StandardRegistry) Unregister(name string) { - r.mutex.Lock() - defer r.mutex.Unlock() - r.stop(name) - delete(r.metrics, name) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *StandardRegistry) UnregisterAll() { - r.mutex.Lock() - defer r.mutex.Unlock() - for name, _ := range r.metrics { - r.stop(name) - delete(r.metrics, name) - } -} - -func (r *StandardRegistry) register(name string, i interface{}) error { - if _, ok := r.metrics[name]; ok { - return DuplicateMetric(name) - } - switch i.(type) { - case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: - r.metrics[name] = i - } - return nil -} - -func (r *StandardRegistry) registered() map[string]interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - metrics := make(map[string]interface{}, len(r.metrics)) - for name, i := range r.metrics { - metrics[name] = i - } - return metrics -} - -func (r *StandardRegistry) stop(name string) { - if i, ok := r.metrics[name]; ok { - if s, ok := i.(Stoppable); ok { - s.Stop() - } - } -} - -// Stoppable defines the metrics which has to be stopped. -type Stoppable interface { - Stop() -} - -type PrefixedRegistry struct { - underlying Registry - prefix string -} - -func NewPrefixedRegistry(prefix string) Registry { - return &PrefixedRegistry{ - underlying: NewRegistry(), - prefix: prefix, - } -} - -func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { - return &PrefixedRegistry{ - underlying: parent, - prefix: prefix, - } -} - -// Call the given function for each registered metric. -func (r *PrefixedRegistry) Each(fn func(string, interface{})) { - wrappedFn := func(prefix string) func(string, interface{}) { - return func(name string, iface interface{}) { - if strings.HasPrefix(name, prefix) { - fn(name, iface) - } else { - return - } - } - } - - baseRegistry, prefix := findPrefix(r, "") - baseRegistry.Each(wrappedFn(prefix)) -} - -func findPrefix(registry Registry, prefix string) (Registry, string) { - switch r := registry.(type) { - case *PrefixedRegistry: - return findPrefix(r.underlying, r.prefix+prefix) - case *StandardRegistry: - return r, prefix - } - return nil, "" -} - -// Get the metric by the given name or nil if none is registered. -func (r *PrefixedRegistry) Get(name string) interface{} { - realName := r.prefix + name - return r.underlying.Get(realName) -} - -// Gets an existing metric or registers the given one. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { - realName := r.prefix + name - return r.underlying.GetOrRegister(realName, metric) -} - -// Register the given metric under the given name. The name will be prefixed. -func (r *PrefixedRegistry) Register(name string, metric interface{}) error { - realName := r.prefix + name - return r.underlying.Register(realName, metric) -} - -// Run all registered healthchecks. -func (r *PrefixedRegistry) RunHealthchecks() { - r.underlying.RunHealthchecks() -} - -// GetAll metrics in the Registry -func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { - return r.underlying.GetAll() -} - -// Unregister the metric with the given name. The name will be prefixed. -func (r *PrefixedRegistry) Unregister(name string) { - realName := r.prefix + name - r.underlying.Unregister(realName) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *PrefixedRegistry) UnregisterAll() { - r.underlying.UnregisterAll() -} - -var DefaultRegistry Registry = NewRegistry() - -// Call the given function for each registered metric. -func Each(f func(string, interface{})) { - DefaultRegistry.Each(f) -} - -// Get the metric by the given name or nil if none is registered. -func Get(name string) interface{} { - return DefaultRegistry.Get(name) -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -func GetOrRegister(name string, i interface{}) interface{} { - return DefaultRegistry.GetOrRegister(name, i) -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func Register(name string, i interface{}) error { - return DefaultRegistry.Register(name, i) -} - -// Register the given metric under the given name. Panics if a metric by the -// given name is already registered. -func MustRegister(name string, i interface{}) { - if err := Register(name, i); err != nil { - panic(err) - } -} - -// Run all registered healthchecks. -func RunHealthchecks() { - DefaultRegistry.RunHealthchecks() -} - -// Unregister the metric with the given name. -func Unregister(name string) { - DefaultRegistry.Unregister(name) -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go deleted file mode 100644 index 11c6b785a..000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime.go +++ /dev/null @@ -1,212 +0,0 @@ -package metrics - -import ( - "runtime" - "runtime/pprof" - "time" -) - -var ( - memStats runtime.MemStats - runtimeMetrics struct { - MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - GCCPUFraction GaugeFloat64 - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge - } - NumCgoCall Gauge - NumGoroutine Gauge - NumThread Gauge - ReadMemStats Timer - } - frees uint64 - lookups uint64 - mallocs uint64 - numGC uint32 - numCgoCalls int64 - - threadCreateProfile = pprof.Lookup("threadcreate") -) - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called as a goroutine. -func CaptureRuntimeMemStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureRuntimeMemStatsOnce(r) - } -} - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called in a background -// goroutine. Giving a registry which has not been given to -// RegisterRuntimeMemStats will panic. -// -// Be very careful with this because runtime.ReadMemStats calls the C -// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() -// and that last one does what it says on the tin. -func CaptureRuntimeMemStatsOnce(r Registry) { - t := time.Now() - runtime.ReadMemStats(&memStats) // This takes 50-200us. - runtimeMetrics.ReadMemStats.UpdateSince(t) - - runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) - runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) - if memStats.DebugGC { - runtimeMetrics.MemStats.DebugGC.Update(1) - } else { - runtimeMetrics.MemStats.DebugGC.Update(0) - } - if memStats.EnableGC { - runtimeMetrics.MemStats.EnableGC.Update(1) - } else { - runtimeMetrics.MemStats.EnableGC.Update(0) - } - - runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) - runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) - runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) - runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) - runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) - runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) - runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) - runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) - runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) - runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) - runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) - runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) - runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) - runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) - runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) - runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) - runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) - - // - i := numGC % uint32(len(memStats.PauseNs)) - ii := memStats.NumGC % uint32(len(memStats.PauseNs)) - if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { - for i = 0; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } else { - if i > ii { - for ; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - i = 0 - } - for ; i < ii; i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } - frees = memStats.Frees - lookups = memStats.Lookups - mallocs = memStats.Mallocs - numGC = memStats.NumGC - - runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) - runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) - runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) - runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) - runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) - - currentNumCgoCalls := numCgoCall() - runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) - numCgoCalls = currentNumCgoCalls - - runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) - - runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) -} - -// Register runtimeMetrics for the Go runtime statistics exported in runtime and -// specifically runtime.MemStats. The runtimeMetrics are named by their -// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. -func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.NumThread = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() - - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.NumThread", runtimeMetrics.NumThread) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go deleted file mode 100644 index e3391f4e8..000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build cgo -// +build !appengine - -package metrics - -import "runtime" - -func numCgoCall() int64 { - return runtime.NumCgoCall() -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go deleted file mode 100644 index ca12c05ba..000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return memStats.GCCPUFraction -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go deleted file mode 100644 index 616a3b475..000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !cgo appengine - -package metrics - -func numCgoCall() int64 { - return 0 -} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go deleted file mode 100644 index be96aa6f1..000000000 --- a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return 0 -} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go deleted file mode 100644 index fecee5ef6..000000000 --- a/vendor/github.com/rcrowley/go-metrics/sample.go +++ /dev/null @@ -1,616 +0,0 @@ -package metrics - -import ( - "math" - "math/rand" - "sort" - "sync" - "time" -) - -const rescaleThreshold = time.Hour - -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Size() int - Snapshot() Sample - StdDev() float64 - Sum() int64 - Update(int64) - Values() []int64 - Variance() float64 -} - -// ExpDecaySample is an exponentially-decaying sample using a forward-decaying -// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time -// Decay Model for Streaming Systems". -// -// -type ExpDecaySample struct { - alpha float64 - count int64 - mutex sync.Mutex - reservoirSize int - t0, t1 time.Time - values *expDecaySampleHeap -} - -// NewExpDecaySample constructs a new exponentially-decaying sample with the -// given reservoir size and alpha. -func NewExpDecaySample(reservoirSize int, alpha float64) Sample { - if UseNilMetrics { - return NilSample{} - } - s := &ExpDecaySample{ - alpha: alpha, - reservoirSize: reservoirSize, - t0: time.Now(), - values: newExpDecaySampleHeap(reservoirSize), - } - s.t1 = s.t0.Add(rescaleThreshold) - return s -} - -// Clear clears all samples. -func (s *ExpDecaySample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.t0 = time.Now() - s.t1 = s.t0.Add(rescaleThreshold) - s.values.Clear() -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *ExpDecaySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *ExpDecaySample) Max() int64 { - return SampleMax(s.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *ExpDecaySample) Mean() float64 { - return SampleMean(s.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *ExpDecaySample) Min() int64 { - return SampleMin(s.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *ExpDecaySample) Percentile(p float64) float64 { - return SamplePercentile(s.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *ExpDecaySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.values.Size() -} - -// Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *ExpDecaySample) StdDev() float64 { - return SampleStdDev(s.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *ExpDecaySample) Sum() int64 { - return SampleSum(s.Values()) -} - -// Update samples a new value. -func (s *ExpDecaySample) Update(v int64) { - s.update(time.Now(), v) -} - -// Values returns a copy of the values in the sample. -func (s *ExpDecaySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return values -} - -// Variance returns the variance of the values in the sample. -func (s *ExpDecaySample) Variance() float64 { - return SampleVariance(s.Values()) -} - -// update samples a new value at a particular timestamp. This is a method all -// its own to facilitate testing. -func (s *ExpDecaySample) update(t time.Time, v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if s.values.Size() == s.reservoirSize { - s.values.Pop() - } - s.values.Push(expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), - v: v, - }) - if t.After(s.t1) { - values := s.values.Values() - t0 := s.t0 - s.values.Clear() - s.t0 = t - s.t1 = s.t0.Add(rescaleThreshold) - for _, v := range values { - v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) - s.values.Push(v) - } - } -} - -// NilSample is a no-op Sample. -type NilSample struct{} - -// Clear is a no-op. -func (NilSample) Clear() {} - -// Count is a no-op. -func (NilSample) Count() int64 { return 0 } - -// Max is a no-op. -func (NilSample) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilSample) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilSample) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilSample) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilSample) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Size is a no-op. -func (NilSample) Size() int { return 0 } - -// Sample is a no-op. -func (NilSample) Snapshot() Sample { return NilSample{} } - -// StdDev is a no-op. -func (NilSample) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilSample) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilSample) Update(v int64) {} - -// Values is a no-op. -func (NilSample) Values() []int64 { return []int64{} } - -// Variance is a no-op. -func (NilSample) Variance() float64 { return 0.0 } - -// SampleMax returns the maximum value of the slice of int64. -func SampleMax(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var max int64 = math.MinInt64 - for _, v := range values { - if max < v { - max = v - } - } - return max -} - -// SampleMean returns the mean value of the slice of int64. -func SampleMean(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - return float64(SampleSum(values)) / float64(len(values)) -} - -// SampleMin returns the minimum value of the slice of int64. -func SampleMin(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var min int64 = math.MaxInt64 - for _, v := range values { - if min > v { - min = v - } - } - return min -} - -// SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values int64Slice, p float64) float64 { - return SamplePercentiles(values, []float64{p})[0] -} - -// SamplePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. -func SamplePercentiles(values int64Slice, ps []float64) []float64 { - scores := make([]float64, len(ps)) - size := len(values) - if size > 0 { - sort.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } - } - } - return scores -} - -// SampleSnapshot is a read-only copy of another Sample. -type SampleSnapshot struct { - count int64 - values []int64 -} - -func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { - return &SampleSnapshot{ - count: count, - values: values, - } -} - -// Clear panics. -func (*SampleSnapshot) Clear() { - panic("Clear called on a SampleSnapshot") -} - -// Count returns the count of inputs at the time the snapshot was taken. -func (s *SampleSnapshot) Count() int64 { return s.count } - -// Max returns the maximal value at the time the snapshot was taken. -func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } - -// Mean returns the mean value at the time the snapshot was taken. -func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } - -// Min returns the minimal value at the time the snapshot was taken. -func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } - -// Percentile returns an arbitrary percentile of values at the time the -// snapshot was taken. -func (s *SampleSnapshot) Percentile(p float64) float64 { - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values at the time -// the snapshot was taken. -func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample at the time the snapshot was taken. -func (s *SampleSnapshot) Size() int { return len(s.values) } - -// Snapshot returns the snapshot. -func (s *SampleSnapshot) Snapshot() Sample { return s } - -// StdDev returns the standard deviation of values at the time the snapshot was -// taken. -func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } - -// Sum returns the sum of values at the time the snapshot was taken. -func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } - -// Update panics. -func (*SampleSnapshot) Update(int64) { - panic("Update called on a SampleSnapshot") -} - -// Values returns a copy of the values in the sample. -func (s *SampleSnapshot) Values() []int64 { - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of values at the time the snapshot was taken. -func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } - -// SampleStdDev returns the standard deviation of the slice of int64. -func SampleStdDev(values []int64) float64 { - return math.Sqrt(SampleVariance(values)) -} - -// SampleSum returns the sum of the slice of int64. -func SampleSum(values []int64) int64 { - var sum int64 - for _, v := range values { - sum += v - } - return sum -} - -// SampleVariance returns the variance of the slice of int64. -func SampleVariance(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - m := SampleMean(values) - var sum float64 - for _, v := range values { - d := float64(v) - m - sum += d * d - } - return sum / float64(len(values)) -} - -// A uniform sample using Vitter's Algorithm R. -// -// -type UniformSample struct { - count int64 - mutex sync.Mutex - reservoirSize int - values []int64 -} - -// NewUniformSample constructs a new uniform sample with the given reservoir -// size. -func NewUniformSample(reservoirSize int) Sample { - if UseNilMetrics { - return NilSample{} - } - return &UniformSample{ - reservoirSize: reservoirSize, - values: make([]int64, 0, reservoirSize), - } -} - -// Clear clears all samples. -func (s *UniformSample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.values = make([]int64, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *UniformSample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *UniformSample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMax(s.values) -} - -// Mean returns the mean of the values in the sample. -func (s *UniformSample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMean(s.values) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *UniformSample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMin(s.values) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *UniformSample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *UniformSample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *UniformSample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *UniformSample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleStdDev(s.values) -} - -// Sum returns the sum of the values in the sample. -func (s *UniformSample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleSum(s.values) -} - -// Update samples a new value. -func (s *UniformSample) Update(v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) < s.reservoirSize { - s.values = append(s.values, v) - } else { - r := rand.Int63n(s.count) - if r < int64(len(s.values)) { - s.values[int(r)] = v - } - } -} - -// Values returns a copy of the values in the sample. -func (s *UniformSample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *UniformSample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleVariance(s.values) -} - -// expDecaySample represents an individual sample in a heap. -type expDecaySample struct { - k float64 - v int64 -} - -func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { - return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} -} - -// expDecaySampleHeap is a min-heap of expDecaySamples. -// The internal implementation is copied from the standard library's container/heap -type expDecaySampleHeap struct { - s []expDecaySample -} - -func (h *expDecaySampleHeap) Clear() { - h.s = h.s[:0] -} - -func (h *expDecaySampleHeap) Push(s expDecaySample) { - n := len(h.s) - h.s = h.s[0 : n+1] - h.s[n] = s - h.up(n) -} - -func (h *expDecaySampleHeap) Pop() expDecaySample { - n := len(h.s) - 1 - h.s[0], h.s[n] = h.s[n], h.s[0] - h.down(0, n) - - n = len(h.s) - s := h.s[n-1] - h.s = h.s[0 : n-1] - return s -} - -func (h *expDecaySampleHeap) Size() int { - return len(h.s) -} - -func (h *expDecaySampleHeap) Values() []expDecaySample { - return h.s -} - -func (h *expDecaySampleHeap) up(j int) { - for { - i := (j - 1) / 2 // parent - if i == j || !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - j = i - } -} - -func (h *expDecaySampleHeap) down(i, n int) { - for { - j1 := 2*i + 1 - if j1 >= n || j1 < 0 { // j1 < 0 after int overflow - break - } - j := j1 // left child - if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { - j = j2 // = 2*i + 2 // right child - } - if !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - i = j - } -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go deleted file mode 100644 index 693f19085..000000000 --- a/vendor/github.com/rcrowley/go-metrics/syslog.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build !windows - -package metrics - -import ( - "fmt" - "log/syslog" - "time" -) - -// Output each metric in the given registry to syslog periodically using -// the given syslogger. -func Syslog(r Registry, d time.Duration, w *syslog.Writer) { - for _ = range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) - case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) - case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) - case Healthcheck: - metric.Check() - w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", - name, - h.Count(), - h.Min(), - h.Max(), - h.Mean(), - h.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - )) - case Meter: - m := metric.Snapshot() - w.Info(fmt.Sprintf( - "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", - name, - m.Count(), - m.Rate1(), - m.Rate5(), - m.Rate15(), - m.RateMean(), - )) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", - name, - t.Count(), - t.Min(), - t.Max(), - t.Mean(), - t.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - t.Rate1(), - t.Rate5(), - t.Rate15(), - t.RateMean(), - )) - } - }) - } -} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go deleted file mode 100644 index d6ec4c626..000000000 --- a/vendor/github.com/rcrowley/go-metrics/timer.go +++ /dev/null @@ -1,329 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Timers capture the duration and rate of events. -type Timer interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Timer - StdDev() float64 - Stop() - Sum() int64 - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) - Variance() float64 -} - -// GetOrRegisterTimer returns an existing Timer or constructs and registers a -// new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterTimer(name string, r Registry) Timer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewTimer).(Timer) -} - -// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewCustomTimer(h Histogram, m Meter) Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: h, - meter: m, - } -} - -// NewRegisteredTimer constructs and registers a new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredTimer(name string, r Registry) Timer { - c := NewTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewTimer constructs a new StandardTimer using an exponentially-decaying -// sample with the same reservoir size and alpha as UNIX load averages. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewTimer() Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), - meter: NewMeter(), - } -} - -// NilTimer is a no-op Timer. -type NilTimer struct { - h Histogram - m Meter -} - -// Count is a no-op. -func (NilTimer) Count() int64 { return 0 } - -// Max is a no-op. -func (NilTimer) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilTimer) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilTimer) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilTimer) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilTimer) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Rate1 is a no-op. -func (NilTimer) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilTimer) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilTimer) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilTimer) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilTimer) Snapshot() Timer { return NilTimer{} } - -// StdDev is a no-op. -func (NilTimer) StdDev() float64 { return 0.0 } - -// Stop is a no-op. -func (NilTimer) Stop() {} - -// Sum is a no-op. -func (NilTimer) Sum() int64 { return 0 } - -// Time is a no-op. -func (NilTimer) Time(func()) {} - -// Update is a no-op. -func (NilTimer) Update(time.Duration) {} - -// UpdateSince is a no-op. -func (NilTimer) UpdateSince(time.Time) {} - -// Variance is a no-op. -func (NilTimer) Variance() float64 { return 0.0 } - -// StandardTimer is the standard implementation of a Timer and uses a Histogram -// and Meter. -type StandardTimer struct { - histogram Histogram - meter Meter - mutex sync.Mutex -} - -// Count returns the number of events recorded. -func (t *StandardTimer) Count() int64 { - return t.histogram.Count() -} - -// Max returns the maximum value in the sample. -func (t *StandardTimer) Max() int64 { - return t.histogram.Max() -} - -// Mean returns the mean of the values in the sample. -func (t *StandardTimer) Mean() float64 { - return t.histogram.Mean() -} - -// Min returns the minimum value in the sample. -func (t *StandardTimer) Min() int64 { - return t.histogram.Min() -} - -// Percentile returns an arbitrary percentile of the values in the sample. -func (t *StandardTimer) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (t *StandardTimer) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (t *StandardTimer) Rate1() float64 { - return t.meter.Rate1() -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (t *StandardTimer) Rate5() float64 { - return t.meter.Rate5() -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (t *StandardTimer) Rate15() float64 { - return t.meter.Rate15() -} - -// RateMean returns the meter's mean rate of events per second. -func (t *StandardTimer) RateMean() float64 { - return t.meter.RateMean() -} - -// Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() Timer { - t.mutex.Lock() - defer t.mutex.Unlock() - return &TimerSnapshot{ - histogram: t.histogram.Snapshot().(*HistogramSnapshot), - meter: t.meter.Snapshot().(*MeterSnapshot), - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (t *StandardTimer) StdDev() float64 { - return t.histogram.StdDev() -} - -// Stop stops the meter. -func (t *StandardTimer) Stop() { - t.meter.Stop() -} - -// Sum returns the sum in the sample. -func (t *StandardTimer) Sum() int64 { - return t.histogram.Sum() -} - -// Record the duration of the execution of the given function. -func (t *StandardTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(d)) - t.meter.Mark(1) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(time.Since(ts))) - t.meter.Mark(1) -} - -// Variance returns the variance of the values in the sample. -func (t *StandardTimer) Variance() float64 { - return t.histogram.Variance() -} - -// TimerSnapshot is a read-only copy of another Timer. -type TimerSnapshot struct { - histogram *HistogramSnapshot - meter *MeterSnapshot -} - -// Count returns the number of events recorded at the time the snapshot was -// taken. -func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } - -// Max returns the maximum value at the time the snapshot was taken. -func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } - -// Mean returns the mean value at the time the snapshot was taken. -func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } - -// Min returns the minimum value at the time the snapshot was taken. -func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } - -// Percentile returns an arbitrary percentile of sampled values at the time the -// snapshot was taken. -func (t *TimerSnapshot) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of sampled values at -// the time the snapshot was taken. -func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// Snapshot returns the snapshot. -func (t *TimerSnapshot) Snapshot() Timer { return t } - -// StdDev returns the standard deviation of the values at the time the snapshot -// was taken. -func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Stop is a no-op. -func (t *TimerSnapshot) Stop() {} - -// Sum returns the sum at the time the snapshot was taken. -func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } - -// Time panics. -func (*TimerSnapshot) Time(func()) { - panic("Time called on a TimerSnapshot") -} - -// Update panics. -func (*TimerSnapshot) Update(time.Duration) { - panic("Update called on a TimerSnapshot") -} - -// UpdateSince panics. -func (*TimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a TimerSnapshot") -} - -// Variance returns the variance of the values at the time the snapshot was -// taken. -func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh deleted file mode 100755 index c4ae91e64..000000000 --- a/vendor/github.com/rcrowley/go-metrics/validate.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e - -# check there are no formatting issues -GOFMT_LINES=`gofmt -l . | wc -l | xargs` -test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" - -# run the tests for the root package -go test -race . diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go deleted file mode 100644 index 091e971d2..000000000 --- a/vendor/github.com/rcrowley/go-metrics/writer.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "sort" - "time" -) - -// Write sorts writes each metric in the given registry periodically to the -// given io.Writer. -func Write(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteOnce(r, w) - } -} - -// WriteOnce sorts and writes metrics in the given registry to the given -// io.Writer. -func WriteOnce(r Registry, w io.Writer) { - var namedMetrics namedMetricSlice - r.Each(func(name string, i interface{}) { - namedMetrics = append(namedMetrics, namedMetric{name, i}) - }) - - sort.Sort(namedMetrics) - for _, namedMetric := range namedMetrics { - switch metric := namedMetric.m.(type) { - case Counter: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Count()) - case Gauge: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Value()) - case GaugeFloat64: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) - fmt.Fprintf(w, " error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "histogram %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", h.Count()) - fmt.Fprintf(w, " min: %9d\n", h.Min()) - fmt.Fprintf(w, " max: %9d\n", h.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "meter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", m.Count()) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "timer %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", t.Count()) - fmt.Fprintf(w, " min: %9d\n", t.Min()) - fmt.Fprintf(w, " max: %9d\n", t.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) - } - } -} - -type namedMetric struct { - name string - m interface{} -} - -// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. -type namedMetricSlice []namedMetric - -func (nms namedMetricSlice) Len() int { return len(nms) } - -func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } - -func (nms namedMetricSlice) Less(i, j int) bool { - return nms[i].name < nms[j].name -} diff --git a/vendor/github.com/rogpeppe/fastuuid/LICENSE b/vendor/github.com/rogpeppe/fastuuid/LICENSE deleted file mode 100644 index 9525fc825..000000000 --- a/vendor/github.com/rogpeppe/fastuuid/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright © 2014, Roger Peppe -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of this project nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/fastuuid/README.md b/vendor/github.com/rogpeppe/fastuuid/README.md deleted file mode 100644 index 6e8f134ae..000000000 --- a/vendor/github.com/rogpeppe/fastuuid/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# fastuuid --- - import "github.com/rogpeppe/fastuuid" - -Package fastuuid provides fast UUID generation of 192 bit universally unique -identifiers. It does not provide formatting or parsing of the identifiers (it is -assumed that a simple hexadecimal or base64 representation is sufficient, for -which adequate functionality exists elsewhere). - -Note that the generated UUIDs are not unguessable - each UUID generated from a -Generator is adjacent to the previously generated UUID. - -It ignores RFC 4122. - -## Usage - -#### type Generator - -```go -type Generator struct { -} -``` - -Generator represents a UUID generator that generates UUIDs in sequence from a -random starting point. - -#### func MustNewGenerator - -```go -func MustNewGenerator() *Generator -``` -MustNewGenerator is like NewGenerator but panics on failure. - -#### func NewGenerator - -```go -func NewGenerator() (*Generator, error) -``` -NewGenerator returns a new Generator. It can fail if the crypto/rand read fails. - -#### func (*Generator) Next - -```go -func (g *Generator) Next() [24]byte -``` -Next returns the next UUID from the generator. Only the first 8 bytes can differ -from the previous UUID, so taking a slice of the first 16 bytes is sufficient to -provide a somewhat less secure 128 bit UUID. - -It is OK to call this method concurrently. diff --git a/vendor/github.com/rogpeppe/fastuuid/uuid.go b/vendor/github.com/rogpeppe/fastuuid/uuid.go deleted file mode 100644 index c7cf6b2e4..000000000 --- a/vendor/github.com/rogpeppe/fastuuid/uuid.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package fastuuid provides fast UUID generation of 192 bit -// universally unique identifiers. It does not provide -// formatting or parsing of the identifiers (it is assumed -// that a simple hexadecimal or base64 representation -// is sufficient, for which adequate functionality exists elsewhere). -// -// Note that the generated UUIDs are not unguessable - each -// UUID generated from a Generator is adjacent to the -// previously generated UUID. -// -// It ignores RFC 4122. -package fastuuid - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "sync/atomic" -) - -// Generator represents a UUID generator that -// generates UUIDs in sequence from a random starting -// point. -type Generator struct { - seed [24]byte - counter uint64 -} - -// NewGenerator returns a new Generator. -// It can fail if the crypto/rand read fails. -func NewGenerator() (*Generator, error) { - var g Generator - _, err := rand.Read(g.seed[:]) - if err != nil { - return nil, errors.New("cannot generate random seed: " + err.Error()) - } - return &g, nil -} - -// MustNewGenerator is like NewGenerator -// but panics on failure. -func MustNewGenerator() *Generator { - g, err := NewGenerator() - if err != nil { - panic(err) - } - return g -} - -// Next returns the next UUID from the generator. -// Only the first 8 bytes can differ from the previous -// UUID, so taking a slice of the first 16 bytes -// is sufficient to provide a somewhat less secure 128 bit UUID. -// -// It is OK to call this method concurrently. -func (g *Generator) Next() [24]byte { - x := atomic.AddUint64(&g.counter, 1) - var counterBytes [8]byte - binary.LittleEndian.PutUint64(counterBytes[:], x) - - uuid := g.seed - for i, b := range counterBytes { - uuid[i] ^= b - } - return uuid -} diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE deleted file mode 100644 index 937942c2b..000000000 --- a/vendor/github.com/sergi/go-diff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go deleted file mode 100644 index 59b885168..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ /dev/null @@ -1,1339 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "bytes" - "errors" - "fmt" - "html" - "math" - "net/url" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Operation defines the operation of a diff item. -type Operation int8 - -const ( - // DiffDelete item represents a delete diff. - DiffDelete Operation = -1 - // DiffInsert item represents an insert diff. - DiffInsert Operation = 1 - // DiffEqual item represents an equal diff. - DiffEqual Operation = 0 -) - -// Diff represents one diff operation -type Diff struct { - Type Operation - Text string -} - -func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { - return append(slice[:index], append(elements, slice[index+amount:]...)...) -} - -// DiffMain finds the differences between two texts. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { - return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) -} - -// DiffMainRunes finds the differences between two rune sequences. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { - var deadline time.Time - if dmp.DiffTimeout > 0 { - deadline = time.Now().Add(dmp.DiffTimeout) - } - return dmp.diffMainRunes(text1, text2, checklines, deadline) -} - -func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { - if runesEqual(text1, text2) { - var diffs []Diff - if len(text1) > 0 { - diffs = append(diffs, Diff{DiffEqual, string(text1)}) - } - return diffs - } - // Trim off common prefix (speedup). - commonlength := commonPrefixLength(text1, text2) - commonprefix := text1[:commonlength] - text1 = text1[commonlength:] - text2 = text2[commonlength:] - - // Trim off common suffix (speedup). - commonlength = commonSuffixLength(text1, text2) - commonsuffix := text1[len(text1)-commonlength:] - text1 = text1[:len(text1)-commonlength] - text2 = text2[:len(text2)-commonlength] - - // Compute the diff on the middle block. - diffs := dmp.diffCompute(text1, text2, checklines, deadline) - - // Restore the prefix and suffix. - if len(commonprefix) != 0 { - diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) - } - if len(commonsuffix) != 0 { - diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) - } - - return dmp.DiffCleanupMerge(diffs) -} - -// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. -func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { - diffs := []Diff{} - if len(text1) == 0 { - // Just add some text (speedup). - return append(diffs, Diff{DiffInsert, string(text2)}) - } else if len(text2) == 0 { - // Just delete some text (speedup). - return append(diffs, Diff{DiffDelete, string(text1)}) - } - - var longtext, shorttext []rune - if len(text1) > len(text2) { - longtext = text1 - shorttext = text2 - } else { - longtext = text2 - shorttext = text1 - } - - if i := runesIndex(longtext, shorttext); i != -1 { - op := DiffInsert - // Swap insertions for deletions if diff is reversed. - if len(text1) > len(text2) { - op = DiffDelete - } - // Shorter text is inside the longer text (speedup). - return []Diff{ - Diff{op, string(longtext[:i])}, - Diff{DiffEqual, string(shorttext)}, - Diff{op, string(longtext[i+len(shorttext):])}, - } - } else if len(shorttext) == 1 { - // Single character string. - // After the previous speedup, the character can't be an equality. - return []Diff{ - Diff{DiffDelete, string(text1)}, - Diff{DiffInsert, string(text2)}, - } - // Check to see if the problem can be split in two. - } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { - // A half-match was found, sort out the return data. - text1A := hm[0] - text1B := hm[1] - text2A := hm[2] - text2B := hm[3] - midCommon := hm[4] - // Send both pairs off for separate processing. - diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) - diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) - // Merge the results. - return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) - } else if checklines && len(text1) > 100 && len(text2) > 100 { - return dmp.diffLineMode(text1, text2, deadline) - } - return dmp.diffBisect(text1, text2, deadline) -} - -// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. -func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { - // Scan the text on a line-by-line basis first. - text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) - - diffs := dmp.diffMainRunes(text1, text2, false, deadline) - - // Convert the diff back to original text. - diffs = dmp.DiffCharsToLines(diffs, linearray) - // Eliminate freak matches (e.g. blank lines) - diffs = dmp.DiffCleanupSemantic(diffs) - - // Rediff any replacement blocks, this time character-by-character. - // Add a dummy entry at the end. - diffs = append(diffs, Diff{DiffEqual, ""}) - - pointer := 0 - countDelete := 0 - countInsert := 0 - - // NOTE: Rune slices are slower than using strings in this case. - textDelete := "" - textInsert := "" - - for pointer < len(diffs) { - switch diffs[pointer].Type { - case DiffInsert: - countInsert++ - textInsert += diffs[pointer].Text - case DiffDelete: - countDelete++ - textDelete += diffs[pointer].Text - case DiffEqual: - // Upon reaching an equality, check for prior redundancies. - if countDelete >= 1 && countInsert >= 1 { - // Delete the offending records and add the merged ones. - diffs = splice(diffs, pointer-countDelete-countInsert, - countDelete+countInsert) - - pointer = pointer - countDelete - countInsert - a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) - for j := len(a) - 1; j >= 0; j-- { - diffs = splice(diffs, pointer, 0, a[j]) - } - pointer = pointer + len(a) - } - - countInsert = 0 - countDelete = 0 - textDelete = "" - textInsert = "" - } - pointer++ - } - - return diffs[:len(diffs)-1] // Remove the dummy entry at the end. -} - -// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. -func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { - // Unused in this code, but retained for interface compatibility. - return dmp.diffBisect([]rune(text1), []rune(text2), deadline) -} - -// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. -// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. -func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { - // Cache the text lengths to prevent multiple calls. - runes1Len, runes2Len := len(runes1), len(runes2) - - maxD := (runes1Len + runes2Len + 1) / 2 - vOffset := maxD - vLength := 2 * maxD - - v1 := make([]int, vLength) - v2 := make([]int, vLength) - for i := range v1 { - v1[i] = -1 - v2[i] = -1 - } - v1[vOffset+1] = 0 - v2[vOffset+1] = 0 - - delta := runes1Len - runes2Len - // If the total number of characters is odd, then the front path will collide with the reverse path. - front := (delta%2 != 0) - // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. - k1start := 0 - k1end := 0 - k2start := 0 - k2end := 0 - for d := 0; d < maxD; d++ { - // Bail out if deadline is reached. - if !deadline.IsZero() && time.Now().After(deadline) { - break - } - - // Walk the front path one step. - for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { - k1Offset := vOffset + k1 - var x1 int - - if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { - x1 = v1[k1Offset+1] - } else { - x1 = v1[k1Offset-1] + 1 - } - - y1 := x1 - k1 - for x1 < runes1Len && y1 < runes2Len { - if runes1[x1] != runes2[y1] { - break - } - x1++ - y1++ - } - v1[k1Offset] = x1 - if x1 > runes1Len { - // Ran off the right of the graph. - k1end += 2 - } else if y1 > runes2Len { - // Ran off the bottom of the graph. - k1start += 2 - } else if front { - k2Offset := vOffset + delta - k1 - if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { - // Mirror x2 onto top-left coordinate system. - x2 := runes1Len - v2[k2Offset] - if x1 >= x2 { - // Overlap detected. - return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) - } - } - } - } - // Walk the reverse path one step. - for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { - k2Offset := vOffset + k2 - var x2 int - if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { - x2 = v2[k2Offset+1] - } else { - x2 = v2[k2Offset-1] + 1 - } - var y2 = x2 - k2 - for x2 < runes1Len && y2 < runes2Len { - if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { - break - } - x2++ - y2++ - } - v2[k2Offset] = x2 - if x2 > runes1Len { - // Ran off the left of the graph. - k2end += 2 - } else if y2 > runes2Len { - // Ran off the top of the graph. - k2start += 2 - } else if !front { - k1Offset := vOffset + delta - k2 - if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { - x1 := v1[k1Offset] - y1 := vOffset + x1 - k1Offset - // Mirror x2 onto top-left coordinate system. - x2 = runes1Len - x2 - if x1 >= x2 { - // Overlap detected. - return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) - } - } - } - } - } - // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. - return []Diff{ - Diff{DiffDelete, string(runes1)}, - Diff{DiffInsert, string(runes2)}, - } -} - -func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, - deadline time.Time) []Diff { - runes1a := runes1[:x] - runes2a := runes2[:y] - runes1b := runes1[x:] - runes2b := runes2[y:] - - // Compute both diffs serially. - diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) - diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) - - return append(diffs, diffsb...) -} - -// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. -// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. -func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { - chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) - return string(chars1), string(chars2), lineArray -} - -// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. -func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. - lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' - lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 - - chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) - chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) - - return chars1, chars2, lineArray -} - -func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { - return dmp.DiffLinesToRunes(string(text1), string(text2)) -} - -// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. -// We use strings instead of []runes as input mainly because you can't use []rune as a map key. -func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. - lineStart := 0 - lineEnd := -1 - runes := []rune{} - - for lineEnd < len(text)-1 { - lineEnd = indexOf(text, "\n", lineStart) - - if lineEnd == -1 { - lineEnd = len(text) - 1 - } - - line := text[lineStart : lineEnd+1] - lineStart = lineEnd + 1 - lineValue, ok := lineHash[line] - - if ok { - runes = append(runes, rune(lineValue)) - } else { - *lineArray = append(*lineArray, line) - lineHash[line] = len(*lineArray) - 1 - runes = append(runes, rune(len(*lineArray)-1)) - } - } - - return runes -} - -// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. -func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { - hydrated := make([]Diff, 0, len(diffs)) - for _, aDiff := range diffs { - chars := aDiff.Text - text := make([]string, len(chars)) - - for i, r := range chars { - text[i] = lineArray[r] - } - - aDiff.Text = strings.Join(text, "") - hydrated = append(hydrated, aDiff) - } - return hydrated -} - -// DiffCommonPrefix determines the common prefix length of two strings. -func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { - // Unused in this code, but retained for interface compatibility. - return commonPrefixLength([]rune(text1), []rune(text2)) -} - -// DiffCommonSuffix determines the common suffix length of two strings. -func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { - // Unused in this code, but retained for interface compatibility. - return commonSuffixLength([]rune(text1), []rune(text2)) -} - -// commonPrefixLength returns the length of the common prefix of two rune slices. -func commonPrefixLength(text1, text2 []rune) int { - short, long := text1, text2 - if len(short) > len(long) { - short, long = long, short - } - for i, r := range short { - if r != long[i] { - return i - } - } - return len(short) -} - -// commonSuffixLength returns the length of the common suffix of two rune slices. -func commonSuffixLength(text1, text2 []rune) int { - n := min(len(text1), len(text2)) - for i := 0; i < n; i++ { - if text1[len(text1)-i-1] != text2[len(text2)-i-1] { - return i - } - } - return n - - // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 - // Binary search. - // Performance analysis: http://neil.fraser.name/news/2007/10/09/ - /* - pointermin := 0 - pointermax := math.Min(len(text1), len(text2)) - pointermid := pointermax - pointerend := 0 - for pointermin < pointermid { - if text1[len(text1)-pointermid:len(text1)-pointerend] == - text2[len(text2)-pointermid:len(text2)-pointerend] { - pointermin = pointermid - pointerend = pointermin - } else { - pointermax = pointermid - } - pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) - } - return pointermid - */ -} - -// DiffCommonOverlap determines if the suffix of one string is the prefix of another. -func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { - // Cache the text lengths to prevent multiple calls. - text1Length := len(text1) - text2Length := len(text2) - // Eliminate the null case. - if text1Length == 0 || text2Length == 0 { - return 0 - } - // Truncate the longer string. - if text1Length > text2Length { - text1 = text1[text1Length-text2Length:] - } else if text1Length < text2Length { - text2 = text2[0:text1Length] - } - textLength := int(math.Min(float64(text1Length), float64(text2Length))) - // Quick check for the worst case. - if text1 == text2 { - return textLength - } - - // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ - best := 0 - length := 1 - for { - pattern := text1[textLength-length:] - found := strings.Index(text2, pattern) - if found == -1 { - break - } - length += found - if found == 0 || text1[textLength-length:] == text2[0:length] { - best = length - length++ - } - } - - return best -} - -// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. -func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { - // Unused in this code, but retained for interface compatibility. - runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) - if runeSlices == nil { - return nil - } - - result := make([]string, len(runeSlices)) - for i, r := range runeSlices { - result[i] = string(r) - } - return result -} - -func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { - if dmp.DiffTimeout <= 0 { - // Don't risk returning a non-optimal diff if we have unlimited time. - return nil - } - - var longtext, shorttext []rune - if len(text1) > len(text2) { - longtext = text1 - shorttext = text2 - } else { - longtext = text2 - shorttext = text1 - } - - if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { - return nil // Pointless. - } - - // First check if the second quarter is the seed for a half-match. - hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) - - // Check again based on the third quarter. - hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) - - hm := [][]rune{} - if hm1 == nil && hm2 == nil { - return nil - } else if hm2 == nil { - hm = hm1 - } else if hm1 == nil { - hm = hm2 - } else { - // Both matched. Select the longest. - if len(hm1[4]) > len(hm2[4]) { - hm = hm1 - } else { - hm = hm2 - } - } - - // A half-match was found, sort out the return data. - if len(text1) > len(text2) { - return hm - } - - return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} -} - -// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? -// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. -func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { - var bestCommonA []rune - var bestCommonB []rune - var bestCommonLen int - var bestLongtextA []rune - var bestLongtextB []rune - var bestShorttextA []rune - var bestShorttextB []rune - - // Start with a 1/4 length substring at position i as a seed. - seed := l[i : i+len(l)/4] - - for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { - prefixLength := commonPrefixLength(l[i:], s[j:]) - suffixLength := commonSuffixLength(l[:i], s[:j]) - - if bestCommonLen < suffixLength+prefixLength { - bestCommonA = s[j-suffixLength : j] - bestCommonB = s[j : j+prefixLength] - bestCommonLen = len(bestCommonA) + len(bestCommonB) - bestLongtextA = l[:i-suffixLength] - bestLongtextB = l[i+prefixLength:] - bestShorttextA = s[:j-suffixLength] - bestShorttextB = s[j+prefixLength:] - } - } - - if bestCommonLen*2 < len(l) { - return nil - } - - return [][]rune{ - bestLongtextA, - bestLongtextB, - bestShorttextA, - bestShorttextB, - append(bestCommonA, bestCommonB...), - } -} - -// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. -func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { - changes := false - // Stack of indices where equalities are found. - type equality struct { - data int - next *equality - } - var equalities *equality - - var lastequality string - // Always equal to diffs[equalities[equalitiesLength - 1]][1] - var pointer int // Index of current position. - // Number of characters that changed prior to the equality. - var lengthInsertions1, lengthDeletions1 int - // Number of characters that changed after the equality. - var lengthInsertions2, lengthDeletions2 int - - for pointer < len(diffs) { - if diffs[pointer].Type == DiffEqual { - // Equality found. - - equalities = &equality{ - data: pointer, - next: equalities, - } - lengthInsertions1 = lengthInsertions2 - lengthDeletions1 = lengthDeletions2 - lengthInsertions2 = 0 - lengthDeletions2 = 0 - lastequality = diffs[pointer].Text - } else { - // An insertion or deletion. - - if diffs[pointer].Type == DiffInsert { - lengthInsertions2 += len(diffs[pointer].Text) - } else { - lengthDeletions2 += len(diffs[pointer].Text) - } - // Eliminate an equality that is smaller or equal to the edits on both sides of it. - difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) - difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) - if len(lastequality) > 0 && - (len(lastequality) <= difference1) && - (len(lastequality) <= difference2) { - // Duplicate record. - insPoint := equalities.data - diffs = append( - diffs[:insPoint], - append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) - - // Change second copy to insert. - diffs[insPoint+1].Type = DiffInsert - // Throw away the equality we just deleted. - equalities = equalities.next - - if equalities != nil { - equalities = equalities.next - } - if equalities != nil { - pointer = equalities.data - } else { - pointer = -1 - } - - lengthInsertions1 = 0 // Reset the counters. - lengthDeletions1 = 0 - lengthInsertions2 = 0 - lengthDeletions2 = 0 - lastequality = "" - changes = true - } - } - pointer++ - } - - // Normalize the diff. - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - diffs = dmp.DiffCleanupSemanticLossless(diffs) - // Find any overlaps between deletions and insertions. - // e.g: abcxxxxxxdef - // -> abcxxxdef - // e.g: xxxabcdefxxx - // -> defxxxabc - // Only extract an overlap if it is as big as the edit ahead or behind it. - pointer = 1 - for pointer < len(diffs) { - if diffs[pointer-1].Type == DiffDelete && - diffs[pointer].Type == DiffInsert { - deletion := diffs[pointer-1].Text - insertion := diffs[pointer].Text - overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) - overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) - if overlapLength1 >= overlapLength2 { - if float64(overlapLength1) >= float64(len(deletion))/2 || - float64(overlapLength1) >= float64(len(insertion))/2 { - - // Overlap found. Insert an equality and trim the surrounding edits. - diffs = append( - diffs[:pointer], - append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) - - diffs[pointer-1].Text = - deletion[0 : len(deletion)-overlapLength1] - diffs[pointer+1].Text = insertion[overlapLength1:] - pointer++ - } - } else { - if float64(overlapLength2) >= float64(len(deletion))/2 || - float64(overlapLength2) >= float64(len(insertion))/2 { - // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. - overlap := Diff{DiffEqual, deletion[:overlapLength2]} - diffs = append( - diffs[:pointer], - append([]Diff{overlap}, diffs[pointer:]...)...) - - diffs[pointer-1].Type = DiffInsert - diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] - diffs[pointer+1].Type = DiffDelete - diffs[pointer+1].Text = deletion[overlapLength2:] - pointer++ - } - } - pointer++ - } - pointer++ - } - - return diffs -} - -// Define some regex patterns for matching boundaries. -var ( - nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) - whitespaceRegex = regexp.MustCompile(`\s`) - linebreakRegex = regexp.MustCompile(`[\r\n]`) - blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) - blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) -) - -// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. -// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. -func diffCleanupSemanticScore(one, two string) int { - if len(one) == 0 || len(two) == 0 { - // Edges are the best. - return 6 - } - - // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. - rune1, _ := utf8.DecodeLastRuneInString(one) - rune2, _ := utf8.DecodeRuneInString(two) - char1 := string(rune1) - char2 := string(rune2) - - nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) - nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) - whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) - whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) - lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) - lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) - blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) - blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) - - if blankLine1 || blankLine2 { - // Five points for blank lines. - return 5 - } else if lineBreak1 || lineBreak2 { - // Four points for line breaks. - return 4 - } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { - // Three points for end of sentences. - return 3 - } else if whitespace1 || whitespace2 { - // Two points for whitespace. - return 2 - } else if nonAlphaNumeric1 || nonAlphaNumeric2 { - // One point for non-alphanumeric. - return 1 - } - return 0 -} - -// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. -// E.g: The cat came. -> The cat came. -func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { - pointer := 1 - - // Intentionally ignore the first and last element (don't need checking). - for pointer < len(diffs)-1 { - if diffs[pointer-1].Type == DiffEqual && - diffs[pointer+1].Type == DiffEqual { - - // This is a single edit surrounded by equalities. - equality1 := diffs[pointer-1].Text - edit := diffs[pointer].Text - equality2 := diffs[pointer+1].Text - - // First, shift the edit as far left as possible. - commonOffset := dmp.DiffCommonSuffix(equality1, edit) - if commonOffset > 0 { - commonString := edit[len(edit)-commonOffset:] - equality1 = equality1[0 : len(equality1)-commonOffset] - edit = commonString + edit[:len(edit)-commonOffset] - equality2 = commonString + equality2 - } - - // Second, step character by character right, looking for the best fit. - bestEquality1 := equality1 - bestEdit := edit - bestEquality2 := equality2 - bestScore := diffCleanupSemanticScore(equality1, edit) + - diffCleanupSemanticScore(edit, equality2) - - for len(edit) != 0 && len(equality2) != 0 { - _, sz := utf8.DecodeRuneInString(edit) - if len(equality2) < sz || edit[:sz] != equality2[:sz] { - break - } - equality1 += edit[:sz] - edit = edit[sz:] + equality2[:sz] - equality2 = equality2[sz:] - score := diffCleanupSemanticScore(equality1, edit) + - diffCleanupSemanticScore(edit, equality2) - // The >= encourages trailing rather than leading whitespace on edits. - if score >= bestScore { - bestScore = score - bestEquality1 = equality1 - bestEdit = edit - bestEquality2 = equality2 - } - } - - if diffs[pointer-1].Text != bestEquality1 { - // We have an improvement, save it back to the diff. - if len(bestEquality1) != 0 { - diffs[pointer-1].Text = bestEquality1 - } else { - diffs = splice(diffs, pointer-1, 1) - pointer-- - } - - diffs[pointer].Text = bestEdit - if len(bestEquality2) != 0 { - diffs[pointer+1].Text = bestEquality2 - } else { - diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) - pointer-- - } - } - } - pointer++ - } - - return diffs -} - -// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. -func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { - changes := false - // Stack of indices where equalities are found. - type equality struct { - data int - next *equality - } - var equalities *equality - // Always equal to equalities[equalitiesLength-1][1] - lastequality := "" - pointer := 0 // Index of current position. - // Is there an insertion operation before the last equality. - preIns := false - // Is there a deletion operation before the last equality. - preDel := false - // Is there an insertion operation after the last equality. - postIns := false - // Is there a deletion operation after the last equality. - postDel := false - for pointer < len(diffs) { - if diffs[pointer].Type == DiffEqual { // Equality found. - if len(diffs[pointer].Text) < dmp.DiffEditCost && - (postIns || postDel) { - // Candidate found. - equalities = &equality{ - data: pointer, - next: equalities, - } - preIns = postIns - preDel = postDel - lastequality = diffs[pointer].Text - } else { - // Not a candidate, and can never become one. - equalities = nil - lastequality = "" - } - postIns = false - postDel = false - } else { // An insertion or deletion. - if diffs[pointer].Type == DiffDelete { - postDel = true - } else { - postIns = true - } - - // Five types to be split: - // ABXYCD - // AXCD - // ABXC - // AXCD - // ABXC - var sumPres int - if preIns { - sumPres++ - } - if preDel { - sumPres++ - } - if postIns { - sumPres++ - } - if postDel { - sumPres++ - } - if len(lastequality) > 0 && - ((preIns && preDel && postIns && postDel) || - ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { - - insPoint := equalities.data - - // Duplicate record. - diffs = append(diffs[:insPoint], - append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) - - // Change second copy to insert. - diffs[insPoint+1].Type = DiffInsert - // Throw away the equality we just deleted. - equalities = equalities.next - lastequality = "" - - if preIns && preDel { - // No changes made which could affect previous entry, keep going. - postIns = true - postDel = true - equalities = nil - } else { - if equalities != nil { - equalities = equalities.next - } - if equalities != nil { - pointer = equalities.data - } else { - pointer = -1 - } - postIns = false - postDel = false - } - changes = true - } - } - pointer++ - } - - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - - return diffs -} - -// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. -// Any edit section can move as long as it doesn't cross an equality. -func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { - // Add a dummy entry at the end. - diffs = append(diffs, Diff{DiffEqual, ""}) - pointer := 0 - countDelete := 0 - countInsert := 0 - commonlength := 0 - textDelete := []rune(nil) - textInsert := []rune(nil) - - for pointer < len(diffs) { - switch diffs[pointer].Type { - case DiffInsert: - countInsert++ - textInsert = append(textInsert, []rune(diffs[pointer].Text)...) - pointer++ - break - case DiffDelete: - countDelete++ - textDelete = append(textDelete, []rune(diffs[pointer].Text)...) - pointer++ - break - case DiffEqual: - // Upon reaching an equality, check for prior redundancies. - if countDelete+countInsert > 1 { - if countDelete != 0 && countInsert != 0 { - // Factor out any common prefixies. - commonlength = commonPrefixLength(textInsert, textDelete) - if commonlength != 0 { - x := pointer - countDelete - countInsert - if x > 0 && diffs[x-1].Type == DiffEqual { - diffs[x-1].Text += string(textInsert[:commonlength]) - } else { - diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) - pointer++ - } - textInsert = textInsert[commonlength:] - textDelete = textDelete[commonlength:] - } - // Factor out any common suffixies. - commonlength = commonSuffixLength(textInsert, textDelete) - if commonlength != 0 { - insertIndex := len(textInsert) - commonlength - deleteIndex := len(textDelete) - commonlength - diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text - textInsert = textInsert[:insertIndex] - textDelete = textDelete[:deleteIndex] - } - } - // Delete the offending records and add the merged ones. - if countDelete == 0 { - diffs = splice(diffs, pointer-countInsert, - countDelete+countInsert, - Diff{DiffInsert, string(textInsert)}) - } else if countInsert == 0 { - diffs = splice(diffs, pointer-countDelete, - countDelete+countInsert, - Diff{DiffDelete, string(textDelete)}) - } else { - diffs = splice(diffs, pointer-countDelete-countInsert, - countDelete+countInsert, - Diff{DiffDelete, string(textDelete)}, - Diff{DiffInsert, string(textInsert)}) - } - - pointer = pointer - countDelete - countInsert + 1 - if countDelete != 0 { - pointer++ - } - if countInsert != 0 { - pointer++ - } - } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { - // Merge this equality with the previous one. - diffs[pointer-1].Text += diffs[pointer].Text - diffs = append(diffs[:pointer], diffs[pointer+1:]...) - } else { - pointer++ - } - countInsert = 0 - countDelete = 0 - textDelete = nil - textInsert = nil - break - } - } - - if len(diffs[len(diffs)-1].Text) == 0 { - diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. - } - - // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC - changes := false - pointer = 1 - // Intentionally ignore the first and last element (don't need checking). - for pointer < (len(diffs) - 1) { - if diffs[pointer-1].Type == DiffEqual && - diffs[pointer+1].Type == DiffEqual { - // This is a single edit surrounded by equalities. - if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { - // Shift the edit over the previous equality. - diffs[pointer].Text = diffs[pointer-1].Text + - diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] - diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text - diffs = splice(diffs, pointer-1, 1) - changes = true - } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { - // Shift the edit over the next equality. - diffs[pointer-1].Text += diffs[pointer+1].Text - diffs[pointer].Text = - diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text - diffs = splice(diffs, pointer+1, 1) - changes = true - } - } - pointer++ - } - - // If shifts were made, the diff needs reordering and another shift sweep. - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - - return diffs -} - -// DiffXIndex returns the equivalent location in s2. -func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { - chars1 := 0 - chars2 := 0 - lastChars1 := 0 - lastChars2 := 0 - lastDiff := Diff{} - for i := 0; i < len(diffs); i++ { - aDiff := diffs[i] - if aDiff.Type != DiffInsert { - // Equality or deletion. - chars1 += len(aDiff.Text) - } - if aDiff.Type != DiffDelete { - // Equality or insertion. - chars2 += len(aDiff.Text) - } - if chars1 > loc { - // Overshot the location. - lastDiff = aDiff - break - } - lastChars1 = chars1 - lastChars2 = chars2 - } - if lastDiff.Type == DiffDelete { - // The location was deleted. - return lastChars2 - } - // Add the remaining character length. - return lastChars2 + (loc - lastChars1) -} - -// DiffPrettyHtml converts a []Diff into a pretty HTML report. -// It is intended as an example from which to write one's own display functions. -func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { - var buff bytes.Buffer - for _, diff := range diffs { - text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) - switch diff.Type { - case DiffInsert: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - case DiffDelete: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - case DiffEqual: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - } - } - return buff.String() -} - -// DiffPrettyText converts a []Diff into a colored text report. -func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { - var buff bytes.Buffer - for _, diff := range diffs { - text := diff.Text - - switch diff.Type { - case DiffInsert: - _, _ = buff.WriteString("\x1b[32m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") - case DiffDelete: - _, _ = buff.WriteString("\x1b[31m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") - case DiffEqual: - _, _ = buff.WriteString(text) - } - } - - return buff.String() -} - -// DiffText1 computes and returns the source text (all equalities and deletions). -func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { - //StringBuilder text = new StringBuilder() - var text bytes.Buffer - - for _, aDiff := range diffs { - if aDiff.Type != DiffInsert { - _, _ = text.WriteString(aDiff.Text) - } - } - return text.String() -} - -// DiffText2 computes and returns the destination text (all equalities and insertions). -func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { - var text bytes.Buffer - - for _, aDiff := range diffs { - if aDiff.Type != DiffDelete { - _, _ = text.WriteString(aDiff.Text) - } - } - return text.String() -} - -// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. -func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { - levenshtein := 0 - insertions := 0 - deletions := 0 - - for _, aDiff := range diffs { - switch aDiff.Type { - case DiffInsert: - insertions += len(aDiff.Text) - case DiffDelete: - deletions += len(aDiff.Text) - case DiffEqual: - // A deletion and an insertion is one substitution. - levenshtein += max(insertions, deletions) - insertions = 0 - deletions = 0 - } - } - - levenshtein += max(insertions, deletions) - return levenshtein -} - -// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. -// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. -func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { - var text bytes.Buffer - for _, aDiff := range diffs { - switch aDiff.Type { - case DiffInsert: - _, _ = text.WriteString("+") - _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) - _, _ = text.WriteString("\t") - break - case DiffDelete: - _, _ = text.WriteString("-") - _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) - _, _ = text.WriteString("\t") - break - case DiffEqual: - _, _ = text.WriteString("=") - _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) - _, _ = text.WriteString("\t") - break - } - } - delta := text.String() - if len(delta) != 0 { - // Strip off trailing tab character. - delta = delta[0 : utf8.RuneCountInString(delta)-1] - delta = unescaper.Replace(delta) - } - return delta -} - -// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. -func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { - i := 0 - - for _, token := range strings.Split(delta, "\t") { - if len(token) == 0 { - // Blank tokens are ok (from a trailing \t). - continue - } - - // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). - param := token[1:] - - switch op := token[0]; op { - case '+': - // Decode would Diff all "+" to " " - param = strings.Replace(param, "+", "%2b", -1) - param, err = url.QueryUnescape(param) - if err != nil { - return nil, err - } - if !utf8.ValidString(param) { - return nil, fmt.Errorf("invalid UTF-8 token: %q", param) - } - - diffs = append(diffs, Diff{DiffInsert, param}) - case '=', '-': - n, err := strconv.ParseInt(param, 10, 0) - if err != nil { - return nil, err - } else if n < 0 { - return nil, errors.New("Negative number in DiffFromDelta: " + param) - } - - // Remember that string slicing is by byte - we want by rune here. - text := string([]rune(text1)[i : i+int(n)]) - i += int(n) - - if op == '=' { - diffs = append(diffs, Diff{DiffEqual, text}) - } else { - diffs = append(diffs, Diff{DiffDelete, text}) - } - default: - // Anything else is an error. - return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) - } - } - - if i != len([]rune(text1)) { - return nil, fmt.Errorf("Delta length (%v) smaller than source text length (%v)", i, len(text1)) - } - - return diffs, nil -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go deleted file mode 100644 index d3acc32ce..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. -package diffmatchpatch - -import ( - "time" -) - -// DiffMatchPatch holds the configuration for diff-match-patch operations. -type DiffMatchPatch struct { - // Number of seconds to map a diff before giving up (0 for infinity). - DiffTimeout time.Duration - // Cost of an empty edit operation in terms of edit characters. - DiffEditCost int - // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). - MatchDistance int - // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. - PatchDeleteThreshold float64 - // Chunk size for context length. - PatchMargin int - // The number of bits in an int. - MatchMaxBits int - // At what point is no match declared (0.0 = perfection, 1.0 = very loose). - MatchThreshold float64 -} - -// New creates a new DiffMatchPatch object with default parameters. -func New() *DiffMatchPatch { - // Defaults. - return &DiffMatchPatch{ - DiffTimeout: time.Second, - DiffEditCost: 4, - MatchThreshold: 0.5, - MatchDistance: 1000, - PatchDeleteThreshold: 0.5, - PatchMargin: 4, - MatchMaxBits: 32, - } -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go deleted file mode 100644 index 17374e109..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "math" -) - -// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. -// Returns -1 if no match found. -func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { - // Check for null inputs not needed since null can't be passed in C#. - - loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) - if text == pattern { - // Shortcut (potentially not guaranteed by the algorithm) - return 0 - } else if len(text) == 0 { - // Nothing to match. - return -1 - } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { - // Perfect match at the perfect spot! (Includes case of null pattern) - return loc - } - // Do a fuzzy compare. - return dmp.MatchBitap(text, pattern, loc) -} - -// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. -// Returns -1 if no match was found. -func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { - // Initialise the alphabet. - s := dmp.MatchAlphabet(pattern) - - // Highest score beyond which we give up. - scoreThreshold := dmp.MatchThreshold - // Is there a nearby exact match? (speedup) - bestLoc := indexOf(text, pattern, loc) - if bestLoc != -1 { - scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, - pattern), scoreThreshold) - // What about in the other direction? (speedup) - bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) - if bestLoc != -1 { - scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, - pattern), scoreThreshold) - } - } - - // Initialise the bit arrays. - matchmask := 1 << uint((len(pattern) - 1)) - bestLoc = -1 - - var binMin, binMid int - binMax := len(pattern) + len(text) - lastRd := []int{} - for d := 0; d < len(pattern); d++ { - // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. - binMin = 0 - binMid = binMax - for binMin < binMid { - if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { - binMin = binMid - } else { - binMax = binMid - } - binMid = (binMax-binMin)/2 + binMin - } - // Use the result from this iteration as the maximum for the next. - binMax = binMid - start := int(math.Max(1, float64(loc-binMid+1))) - finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) - - rd := make([]int, finish+2) - rd[finish+1] = (1 << uint(d)) - 1 - - for j := finish; j >= start; j-- { - var charMatch int - if len(text) <= j-1 { - // Out of range. - charMatch = 0 - } else if _, ok := s[text[j-1]]; !ok { - charMatch = 0 - } else { - charMatch = s[text[j-1]] - } - - if d == 0 { - // First pass: exact match. - rd[j] = ((rd[j+1] << 1) | 1) & charMatch - } else { - // Subsequent passes: fuzzy match. - rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] - } - if (rd[j] & matchmask) != 0 { - score := dmp.matchBitapScore(d, j-1, loc, pattern) - // This match will almost certainly be better than any existing match. But check anyway. - if score <= scoreThreshold { - // Told you so. - scoreThreshold = score - bestLoc = j - 1 - if bestLoc > loc { - // When passing loc, don't exceed our current distance from loc. - start = int(math.Max(1, float64(2*loc-bestLoc))) - } else { - // Already passed loc, downhill from here on in. - break - } - } - } - } - if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { - // No hope for a (better) match at greater error levels. - break - } - lastRd = rd - } - return bestLoc -} - -// matchBitapScore computes and returns the score for a match with e errors and x location. -func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { - accuracy := float64(e) / float64(len(pattern)) - proximity := math.Abs(float64(loc - x)) - if dmp.MatchDistance == 0 { - // Dodge divide by zero error. - if proximity == 0 { - return accuracy - } - - return 1.0 - } - return accuracy + (proximity / float64(dmp.MatchDistance)) -} - -// MatchAlphabet initialises the alphabet for the Bitap algorithm. -func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { - s := map[byte]int{} - charPattern := []byte(pattern) - for _, c := range charPattern { - _, ok := s[c] - if !ok { - s[c] = 0 - } - } - i := 0 - - for _, c := range charPattern { - value := s[c] | int(uint(1)< y { - return x - } - return y -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go deleted file mode 100644 index 116c04348..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "bytes" - "errors" - "math" - "net/url" - "regexp" - "strconv" - "strings" -) - -// Patch represents one patch operation. -type Patch struct { - diffs []Diff - start1 int - start2 int - length1 int - length2 int -} - -// String emulates GNU diff's format. -// Header: @@ -382,8 +481,9 @@ -// Indicies are printed as 1-based, not 0-based. -func (p *Patch) String() string { - var coords1, coords2 string - - if p.length1 == 0 { - coords1 = strconv.Itoa(p.start1) + ",0" - } else if p.length1 == 1 { - coords1 = strconv.Itoa(p.start1 + 1) - } else { - coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) - } - - if p.length2 == 0 { - coords2 = strconv.Itoa(p.start2) + ",0" - } else if p.length2 == 1 { - coords2 = strconv.Itoa(p.start2 + 1) - } else { - coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) - } - - var text bytes.Buffer - _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") - - // Escape the body of the patch with %xx notation. - for _, aDiff := range p.diffs { - switch aDiff.Type { - case DiffInsert: - _, _ = text.WriteString("+") - case DiffDelete: - _, _ = text.WriteString("-") - case DiffEqual: - _, _ = text.WriteString(" ") - } - - _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) - _, _ = text.WriteString("\n") - } - - return unescaper.Replace(text.String()) -} - -// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. -func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { - if len(text) == 0 { - return patch - } - - pattern := text[patch.start2 : patch.start2+patch.length1] - padding := 0 - - // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. - for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && - len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { - padding += dmp.PatchMargin - maxStart := max(0, patch.start2-padding) - minEnd := min(len(text), patch.start2+patch.length1+padding) - pattern = text[maxStart:minEnd] - } - // Add one chunk for good luck. - padding += dmp.PatchMargin - - // Add the prefix. - prefix := text[max(0, patch.start2-padding):patch.start2] - if len(prefix) != 0 { - patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) - } - // Add the suffix. - suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)] - if len(suffix) != 0 { - patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) - } - - // Roll back the start points. - patch.start1 -= len(prefix) - patch.start2 -= len(prefix) - // Extend the lengths. - patch.length1 += len(prefix) + len(suffix) - patch.length2 += len(prefix) + len(suffix) - - return patch -} - -// PatchMake computes a list of patches. -func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { - if len(opt) == 1 { - diffs, _ := opt[0].([]Diff) - text1 := dmp.DiffText1(diffs) - return dmp.PatchMake(text1, diffs) - } else if len(opt) == 2 { - text1 := opt[0].(string) - switch t := opt[1].(type) { - case string: - diffs := dmp.DiffMain(text1, t, true) - if len(diffs) > 2 { - diffs = dmp.DiffCleanupSemantic(diffs) - diffs = dmp.DiffCleanupEfficiency(diffs) - } - return dmp.PatchMake(text1, diffs) - case []Diff: - return dmp.patchMake2(text1, t) - } - } else if len(opt) == 3 { - return dmp.PatchMake(opt[0], opt[2]) - } - return []Patch{} -} - -// patchMake2 computes a list of patches to turn text1 into text2. -// text2 is not provided, diffs are the delta between text1 and text2. -func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { - // Check for null inputs not needed since null can't be passed in C#. - patches := []Patch{} - if len(diffs) == 0 { - return patches // Get rid of the null case. - } - - patch := Patch{} - charCount1 := 0 // Number of characters into the text1 string. - charCount2 := 0 // Number of characters into the text2 string. - // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. - prepatchText := text1 - postpatchText := text1 - - for i, aDiff := range diffs { - if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { - // A new patch starts here. - patch.start1 = charCount1 - patch.start2 = charCount2 - } - - switch aDiff.Type { - case DiffInsert: - patch.diffs = append(patch.diffs, aDiff) - patch.length2 += len(aDiff.Text) - postpatchText = postpatchText[:charCount2] + - aDiff.Text + postpatchText[charCount2:] - case DiffDelete: - patch.length1 += len(aDiff.Text) - patch.diffs = append(patch.diffs, aDiff) - postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] - case DiffEqual: - if len(aDiff.Text) <= 2*dmp.PatchMargin && - len(patch.diffs) != 0 && i != len(diffs)-1 { - // Small equality inside a patch. - patch.diffs = append(patch.diffs, aDiff) - patch.length1 += len(aDiff.Text) - patch.length2 += len(aDiff.Text) - } - if len(aDiff.Text) >= 2*dmp.PatchMargin { - // Time for a new patch. - if len(patch.diffs) != 0 { - patch = dmp.PatchAddContext(patch, prepatchText) - patches = append(patches, patch) - patch = Patch{} - // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. - prepatchText = postpatchText - charCount1 = charCount2 - } - } - } - - // Update the current character count. - if aDiff.Type != DiffInsert { - charCount1 += len(aDiff.Text) - } - if aDiff.Type != DiffDelete { - charCount2 += len(aDiff.Text) - } - } - - // Pick up the leftover patch if not empty. - if len(patch.diffs) != 0 { - patch = dmp.PatchAddContext(patch, prepatchText) - patches = append(patches, patch) - } - - return patches -} - -// PatchDeepCopy returns an array that is identical to a given an array of patches. -func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { - patchesCopy := []Patch{} - for _, aPatch := range patches { - patchCopy := Patch{} - for _, aDiff := range aPatch.diffs { - patchCopy.diffs = append(patchCopy.diffs, Diff{ - aDiff.Type, - aDiff.Text, - }) - } - patchCopy.start1 = aPatch.start1 - patchCopy.start2 = aPatch.start2 - patchCopy.length1 = aPatch.length1 - patchCopy.length2 = aPatch.length2 - patchesCopy = append(patchesCopy, patchCopy) - } - return patchesCopy -} - -// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. -func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { - if len(patches) == 0 { - return text, []bool{} - } - - // Deep copy the patches so that no changes are made to originals. - patches = dmp.PatchDeepCopy(patches) - - nullPadding := dmp.PatchAddPadding(patches) - text = nullPadding + text + nullPadding - patches = dmp.PatchSplitMax(patches) - - x := 0 - // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. - delta := 0 - results := make([]bool, len(patches)) - for _, aPatch := range patches { - expectedLoc := aPatch.start2 + delta - text1 := dmp.DiffText1(aPatch.diffs) - var startLoc int - endLoc := -1 - if len(text1) > dmp.MatchMaxBits { - // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. - startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) - if startLoc != -1 { - endLoc = dmp.MatchMain(text, - text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) - if endLoc == -1 || startLoc >= endLoc { - // Can't find valid trailing context. Drop this patch. - startLoc = -1 - } - } - } else { - startLoc = dmp.MatchMain(text, text1, expectedLoc) - } - if startLoc == -1 { - // No match found. :( - results[x] = false - // Subtract the delta for this failed patch from subsequent patches. - delta -= aPatch.length2 - aPatch.length1 - } else { - // Found a match. :) - results[x] = true - delta = startLoc - expectedLoc - var text2 string - if endLoc == -1 { - text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] - } else { - text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] - } - if text1 == text2 { - // Perfect match, just shove the Replacement text in. - text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] - } else { - // Imperfect match. Run a diff to get a framework of equivalent indices. - diffs := dmp.DiffMain(text1, text2, false) - if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { - // The end points match, but the content is unacceptably bad. - results[x] = false - } else { - diffs = dmp.DiffCleanupSemanticLossless(diffs) - index1 := 0 - for _, aDiff := range aPatch.diffs { - if aDiff.Type != DiffEqual { - index2 := dmp.DiffXIndex(diffs, index1) - if aDiff.Type == DiffInsert { - // Insertion - text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] - } else if aDiff.Type == DiffDelete { - // Deletion - startIndex := startLoc + index2 - text = text[:startIndex] + - text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] - } - } - if aDiff.Type != DiffDelete { - index1 += len(aDiff.Text) - } - } - } - } - } - x++ - } - // Strip the padding off. - text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] - return text, results -} - -// PatchAddPadding adds some padding on text start and end so that edges can match something. -// Intended to be called only from within patchApply. -func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { - paddingLength := dmp.PatchMargin - nullPadding := "" - for x := 1; x <= paddingLength; x++ { - nullPadding += string(x) - } - - // Bump all the patches forward. - for i := range patches { - patches[i].start1 += paddingLength - patches[i].start2 += paddingLength - } - - // Add some padding on start of first diff. - if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { - // Add nullPadding equality. - patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) - patches[0].start1 -= paddingLength // Should be 0. - patches[0].start2 -= paddingLength // Should be 0. - patches[0].length1 += paddingLength - patches[0].length2 += paddingLength - } else if paddingLength > len(patches[0].diffs[0].Text) { - // Grow first equality. - extraLength := paddingLength - len(patches[0].diffs[0].Text) - patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text - patches[0].start1 -= extraLength - patches[0].start2 -= extraLength - patches[0].length1 += extraLength - patches[0].length2 += extraLength - } - - // Add some padding on end of last diff. - last := len(patches) - 1 - if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { - // Add nullPadding equality. - patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) - patches[last].length1 += paddingLength - patches[last].length2 += paddingLength - } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { - // Grow last equality. - lastDiff := patches[last].diffs[len(patches[last].diffs)-1] - extraLength := paddingLength - len(lastDiff.Text) - patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] - patches[last].length1 += extraLength - patches[last].length2 += extraLength - } - - return nullPadding -} - -// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. -// Intended to be called only from within patchApply. -func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { - patchSize := dmp.MatchMaxBits - for x := 0; x < len(patches); x++ { - if patches[x].length1 <= patchSize { - continue - } - bigpatch := patches[x] - // Remove the big old patch. - patches = append(patches[:x], patches[x+1:]...) - x-- - - start1 := bigpatch.start1 - start2 := bigpatch.start2 - precontext := "" - for len(bigpatch.diffs) != 0 { - // Create one of several smaller patches. - patch := Patch{} - empty := true - patch.start1 = start1 - len(precontext) - patch.start2 = start2 - len(precontext) - if len(precontext) != 0 { - patch.length1 = len(precontext) - patch.length2 = len(precontext) - patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) - } - for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin { - diffType := bigpatch.diffs[0].Type - diffText := bigpatch.diffs[0].Text - if diffType == DiffInsert { - // Insertions are harmless. - patch.length2 += len(diffText) - start2 += len(diffText) - patch.diffs = append(patch.diffs, bigpatch.diffs[0]) - bigpatch.diffs = bigpatch.diffs[1:] - empty = false - } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { - // This is a large deletion. Let it pass in one chunk. - patch.length1 += len(diffText) - start1 += len(diffText) - empty = false - patch.diffs = append(patch.diffs, Diff{diffType, diffText}) - bigpatch.diffs = bigpatch.diffs[1:] - } else { - // Deletion or equality. Only take as much as we can stomach. - diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)] - - patch.length1 += len(diffText) - start1 += len(diffText) - if diffType == DiffEqual { - patch.length2 += len(diffText) - start2 += len(diffText) - } else { - empty = false - } - patch.diffs = append(patch.diffs, Diff{diffType, diffText}) - if diffText == bigpatch.diffs[0].Text { - bigpatch.diffs = bigpatch.diffs[1:] - } else { - bigpatch.diffs[0].Text = - bigpatch.diffs[0].Text[len(diffText):] - } - } - } - // Compute the head context for the next patch. - precontext = dmp.DiffText2(patch.diffs) - precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] - - postcontext := "" - // Append the end context for this patch. - if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { - postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] - } else { - postcontext = dmp.DiffText1(bigpatch.diffs) - } - - if len(postcontext) != 0 { - patch.length1 += len(postcontext) - patch.length2 += len(postcontext) - if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { - patch.diffs[len(patch.diffs)-1].Text += postcontext - } else { - patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) - } - } - if !empty { - x++ - patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) - } - } - } - return patches -} - -// PatchToText takes a list of patches and returns a textual representation. -func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { - var text bytes.Buffer - for _, aPatch := range patches { - _, _ = text.WriteString(aPatch.String()) - } - return text.String() -} - -// PatchFromText parses a textual representation of patches and returns a List of Patch objects. -func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { - patches := []Patch{} - if len(textline) == 0 { - return patches, nil - } - text := strings.Split(textline, "\n") - textPointer := 0 - patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") - - var patch Patch - var sign uint8 - var line string - for textPointer < len(text) { - - if !patchHeader.MatchString(text[textPointer]) { - return patches, errors.New("Invalid patch string: " + text[textPointer]) - } - - patch = Patch{} - m := patchHeader.FindStringSubmatch(text[textPointer]) - - patch.start1, _ = strconv.Atoi(m[1]) - if len(m[2]) == 0 { - patch.start1-- - patch.length1 = 1 - } else if m[2] == "0" { - patch.length1 = 0 - } else { - patch.start1-- - patch.length1, _ = strconv.Atoi(m[2]) - } - - patch.start2, _ = strconv.Atoi(m[3]) - - if len(m[4]) == 0 { - patch.start2-- - patch.length2 = 1 - } else if m[4] == "0" { - patch.length2 = 0 - } else { - patch.start2-- - patch.length2, _ = strconv.Atoi(m[4]) - } - textPointer++ - - for textPointer < len(text) { - if len(text[textPointer]) > 0 { - sign = text[textPointer][0] - } else { - textPointer++ - continue - } - - line = text[textPointer][1:] - line = strings.Replace(line, "+", "%2b", -1) - line, _ = url.QueryUnescape(line) - if sign == '-' { - // Deletion. - patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) - } else if sign == '+' { - // Insertion. - patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) - } else if sign == ' ' { - // Minor equality. - patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) - } else if sign == '@' { - // Start of next patch. - break - } else { - // WTF? - return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) - } - textPointer++ - } - - patches = append(patches, patch) - } - return patches, nil -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go deleted file mode 100644 index 265f29cc7..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "strings" - "unicode/utf8" -) - -// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. -// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. -var unescaper = strings.NewReplacer( - "%21", "!", "%7E", "~", "%27", "'", - "%28", "(", "%29", ")", "%3B", ";", - "%2F", "/", "%3F", "?", "%3A", ":", - "%40", "@", "%26", "&", "%3D", "=", - "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") - -// indexOf returns the first index of pattern in str, starting at str[i]. -func indexOf(str string, pattern string, i int) int { - if i > len(str)-1 { - return -1 - } - if i <= 0 { - return strings.Index(str, pattern) - } - ind := strings.Index(str[i:], pattern) - if ind == -1 { - return -1 - } - return ind + i -} - -// lastIndexOf returns the last index of pattern in str, starting at str[i]. -func lastIndexOf(str string, pattern string, i int) int { - if i < 0 { - return -1 - } - if i >= len(str) { - return strings.LastIndex(str, pattern) - } - _, size := utf8.DecodeRuneInString(str[i:]) - return strings.LastIndex(str[:i+size], pattern) -} - -// runesIndexOf returns the index of pattern in target, starting at target[i]. -func runesIndexOf(target, pattern []rune, i int) int { - if i > len(target)-1 { - return -1 - } - if i <= 0 { - return runesIndex(target, pattern) - } - ind := runesIndex(target[i:], pattern) - if ind == -1 { - return -1 - } - return ind + i -} - -func runesEqual(r1, r2 []rune) bool { - if len(r1) != len(r2) { - return false - } - for i, c := range r1 { - if c != r2[i] { - return false - } - } - return true -} - -// runesIndex is the equivalent of strings.Index for rune slices. -func runesIndex(r1, r2 []rune) int { - last := len(r1) - len(r2) - for i := 0; i <= last; i++ { - if runesEqual(r1[i:i+len(r2)], r2) { - return i - } - } - return -1 -} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc52579..000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f7..000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa37..000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index b13941258..000000000 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// clientConnPoolIdleCloser is the interface implemented by ClientConnPool -// implementations which can close their idle connections. -type clientConnPoolIdleCloser interface { - ClientConnPool - closeIdleConnections() -} - -var ( - _ clientConnPoolIdleCloser = (*clientConnPool)(nil) - _ clientConnPoolIdleCloser = noDialClientConnPool{} -) - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if isConnectionCloseRequest(req) && dialOnMiss { - // It gets its own connection. - const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) - if err != nil { - return nil, err - } - return cc, nil - } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index 4f720f530..000000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 20fd7626a..000000000 --- a/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode - Cause error // optional additional detail -} - -func streamError(id uint32, code ErrCode) StreamError { - return StreamError{StreamID: id, Code: code} -} - -func (e StreamError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) - } - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). -type connError struct { - Code ErrCode - Reason string -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf..000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index 957de2542..000000000 --- a/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false - } - f.n += n - return true -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index 358833fed..000000000 --- a/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1544 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if f.logWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) - return - } - f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - logWrites: logFrameWrites, - debugReadLoggerf: log.Printf, - debugWriteLoggerf: log.Printf, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := &DataFrame{ - FrameHeader: fh, - } - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var ( - errStreamID = errors.New("invalid stream ID") - errDepStreamID = errors.New("invalid dependent stream ID") - errPadLength = errors.New("pad length too large") -) - -func validStreamIDOrZero(streamID uint32) bool { - return streamID&(1<<31) == 0 -} - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - return f.WriteDataPadded(streamID, endStream, data, nil) -} - -// WriteData writes a DATA frame with optional padding. -// -// If pad is nil, the padding bit is not sent. -// The length of pad must not exceed 255 bytes. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if len(pad) > 255 { - return errPadLength - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - if pad != nil { - flags |= FlagDataPadded - } - f.startWrite(FrameData, flags, streamID) - if pad != nil { - f.wbuf = append(f.wbuf, byte(len(pad))) - } - f.wbuf = append(f.wbuf, data...) - f.wbuf = append(f.wbuf, pad...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { - f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true - } - buf = buf[6:] - } - return 0, false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { - return err - } - buf = buf[6:] - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { - return errDepStreamID - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if !validStreamIDOrZero(p.StreamDep) { - return errDepStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if VerboseLogs && fr.logReads { - fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) - } - if !httplex.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validWireHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - if VerboseLogs { - log.Printf("http2: invalid pseudo headers: %v", err) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 2b72855f5..000000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index 47b7fae08..000000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go deleted file mode 100644 index b4c52ecec..000000000 --- a/vendor/golang.org/x/net/http2/go17_not18.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,!go1.8 - -package http2 - -import "crypto/tls" - -// temporary copy of Go 1.7's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go deleted file mode 100644 index 633202c39..000000000 --- a/vendor/golang.org/x/net/http2/go18.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "crypto/tls" - "io" - "net/http" -) - -func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } - -var _ http.Pusher = (*responseWriter)(nil) - -// Push implements http.Pusher. -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - internalOpts := pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func configureServer18(h1 *http.Server, h2 *Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func reqBodyIsNoBody(body io.ReadCloser) bool { - return body == http.NoBody -} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f8c..000000000 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6ac..000000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index f9bb03398..000000000 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return - } - - j, nameValueMatch := e.dynTab.search(f) - if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) - } - return -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index 135b9f62c..000000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid pseudo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7541 section 4.1. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - } - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // ents is the FIFO described at - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - -func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff (front of the slice) -func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] - } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return -} - -func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { - return - } - if i > uint64(d.maxTableIndex()) { - return - } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index 8850e3946..000000000 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - n := rootHuffmanNode - // cur is the bit buffer that has not been fed into n. - // cbits is the number of low order bits in cur that are valid. - // sbits is the number of bits of the symbol prefix being decoded. - cur, cbits, sbits := uint(0), uint8(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - cbits += 8 - sbits += 8 - for cbits >= 8 { - idx := byte(cur >> (cbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } else { - cbits -= 8 - } - } - } - for cbits > 0 { - n = n.children[byte(cur<<(8-cbits))] - if n == nil { - return ErrInvalidHuffman - } - if n.children != nil || n.codeLen > cbits { - break - } - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } - if sbits > 7 { - // Either there was an incomplete symbol, or overlong padding. - // Both are decoding errors per RFC 7541 section 5.2. - return ErrInvalidHuffman - } - if mask := uint(1< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index b9283a023..000000000 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index b6b0f9ad1..000000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 // import "golang.org/x/net/http2" - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/lex/httplex" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -// HTTP/2 stream states. -// -// See http://tools.ietf.org/html/rfc7540#section-5.1. -// -// For simplicity, the server code merges "reserved (local)" into -// "half-closed (remote)". This is one less state transition to track. -// The only downside is that we send PUSH_PROMISEs slightly less -// liberally than allowable. More discussion here: -// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html -// -// "reserved (remote)" is omitted since the client code does not -// support server push. -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httplex.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -// bufWriterPoolBufferSize is the size of bufio.Writer's -// buffers created using bufWriterPool. -// -// TODO: pick a less arbitrary value? this is a bit under -// (3 x typical 1500 byte MTU) at least. Other than that, -// not much thought went into it. -const bufWriterPoolBufferSize = 4 << 10 - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) - }, -} - -func (w *bufferedWriter) Available() int { - if w.bw == nil { - return bufWriterPoolBufferSize - } - return w.bw.Available() -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owns, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// *) a non-empty string starting with '/', but not with with "//", -// *) the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" -} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index efd2e1282..000000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 140434a79..000000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index efbf83c32..000000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index 53b7a1daf..000000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index 3c6b90ccd..000000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2753 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool - - // IdleTimeout specifies how long until idle clients should be - // closed with a GOAWAY frame. PING frames are not considered - // activity for the purposes of IdleTimeout. - IdleTimeout time.Duration - - // NewWriteScheduler constructs a write scheduler for a connection. - // If nil, a default scheduler is chosen. - NewWriteScheduler func() WriteScheduler -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if s == nil { - panic("nil *http.Server") - } - if conf == nil { - conf = new(Server) - } - if err := configureServer18(s, conf); err != nil { - return err - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - wantStartPushCh: make(chan startPushRequest, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - // The net/http package sets the write deadline from the - // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here so that it is not applied to additional - // streams opened on this connection. - // TODO: implement WriteTimeout fully. See Issue 18437. - if sc.hs.WriteTimeout != 0 { - sc.conn.SetWriteDeadline(time.Time{}) - } - - if s.NewWriteScheduler != nil { - sc.writeSched = s.NewWriteScheduler() - } else { - sc.writeSched = NewRandomWriteScheduler() - } - - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx contextContext - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wantStartPushCh chan startPushRequest // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - writeSched WriteScheduler - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - idleTimerCh <-chan time.Time // nil if unused - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -func (sc *serverConn) curOpenStreams() uint32 { - sc.serveG.check() - return sc.curClientStreams + sc.curPushedStreams -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte // if non-nil, body pipe buffer to return later at EOF - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://tools.ietf.org/html/rfc7540#section-5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID%2 == 1 { - if streamID <= sc.maxClientStreamID { - return stateClosed, nil - } - } else { - if streamID <= sc.maxPushPromiseID { - return stateClosed, nil - } - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wr FrameWriteRequest // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { - err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings - }, - }) - sc.unackedSettings++ - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) - defer sc.idleTimer.Stop() - sc.idleTimerCh = sc.idleTimer.C - } - - var gracefulShutdownCh <-chan struct{} - if sc.hs != nil { - gracefulShutdownCh = h1ServerShutdownChan(sc.hs) - } - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.NewTimer(firstSettingsTimeout) - loopNum := 0 - for { - loopNum++ - select { - case wr := <-sc.wantWriteFrameCh: - sc.writeFrame(wr) - case spr := <-sc.wantStartPushCh: - sc.startPush(spr) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer.C != nil { - settingsTimer.Stop() - settingsTimer.C = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-gracefulShutdownCh: - gracefulShutdownCh = nil - sc.startGracefulShutdown() - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case <-sc.idleTimerCh: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case fn := <-sc.testHookCh: - fn(loopNum) - } - - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return - } - } -} - -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errors.New("timeout waiting for client preface") - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wr: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wr FrameWriteRequest) { - sc.serveG.check() - - // If true, wr will not be written and wr.done will not be signaled. - var ignoreWrite bool - - // We are not allowed to write frames on closed streams. RFC 7540 Section - // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on - // a closed stream." Our server never sends PRIORITY, so that exception - // does not apply. - // - // The serverConn might close an open stream while the stream's handler - // is still running. For example, the server might close a stream when it - // receives bad data from the client. If this happens, the handler might - // attempt to write a frame after the stream has been closed (since the - // handler hasn't yet been notified of the close). In this case, we simply - // ignore the frame. The handler will notice that the stream is closed when - // it waits for the frame to be written. - // - // As an exception to this rule, we allow sending RST_STREAM after close. - // This allows us to immediately reject new streams without tracking any - // state for those streams (except for the queued RST_STREAM frame). This - // may result in duplicate RST_STREAMs in some cases, but the client should - // ignore those. - if wr.StreamID() != 0 { - _, isReset := wr.write.(StreamError) - if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { - ignoreWrite = true - } - } - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wr.write.(type) { - case *writeResHeaders: - wr.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wr.stream.wroteHeaders { - // We do not need to notify wr.done because this frame is - // never written with wr.done != nil. - if wr.done != nil { - panic("wr.done != nil for write100ContinueHeadersFrame") - } - ignoreWrite = true - } - } - - if !ignoreWrite { - sc.writeSched.Push(wr) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wr (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wr. -func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wr.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - switch wr.write.(type) { - case StreamError, handlerPanicRST, writeWindowUpdate: - // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE - // in this state. (We never send PRIORITY from the server, so that is not checked.) - default: - panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) - } - case stateClosed: - panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) - } - } - if wpp, ok := wr.write.(*writePushPromise); ok { - var err error - wpp.promisedID, err = wpp.allocatePromisedID() - if err != nil { - sc.writingFrameAsync = false - wr.replyToWriter(err) - return - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - if wr.write.staysWithinBuffer(sc.bw.Available()) { - sc.writingFrameAsync = false - err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) - } else { - sc.writingFrameAsync = true - go sc.writeFrameAsync(wr) - } -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - sc.writingFrameAsync = false - - wr := res.wr - - if writeEndsStream(wr.write) { - st := wr.stream - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for closing a ResponseWriter while still - // reading data (see possible TODO at top of - // this file), we go into closed state here - // anyway, after telling the peer we're - // hanging up on them. We'll transition to - // stateClosed after the RST_STREAM frame is - // written. - st.state = stateHalfClosedLocal - sc.resetStream(streamError(st.id, ErrCodeCancel)) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } else { - switch v := wr.write.(type) { - case StreamError: - // st may be unknown if the RST_STREAM was generated to reject bad input. - if st, ok := sc.streams[v.StreamID]; ok { - sc.closeStream(st, v) - } - case handlerPanicRST: - sc.closeStream(wr.stream, errHandlerPanicked) - } - } - - // Reply (if requested) to unblock the ServeHTTP goroutine. - wr.replyToWriter(res.err) - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame || sc.inFrameScheduleLoop { - return - } - sc.inFrameScheduleLoop = true - for !sc.writingFrameAsync { - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(FrameWriteRequest{ - write: &writeGoAway{ - maxStreamID: sc.maxClientStreamID, - code: sc.goAwayCode, - }, - }) - continue - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) - continue - } - if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { - if wr, ok := sc.writeSched.Pop(); ok { - sc.startFrameWrite(wr) - continue - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - continue - } - break - } - sc.inFrameScheduleLoop = false -} - -// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the -// client we're gracefully shutting down. The connection isn't closed -// until all current streams are done. -func (sc *serverConn) startGracefulShutdown() { - sc.goAwayIn(ErrCodeNo, 0) -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) -} - -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { - sc.serveG.check() - if sc.inGoAway { - return - } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(FrameWriteRequest{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.resetQueued = true - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *GoAwayFrame: - return sc.processGoAway(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - state, st := sc.state(f.StreamID) - if state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - if st.isPushed() { - sc.curPushedStreams-- - } else { - sc.curClientStreams-- - } - delete(sc.streams, st.id) - if len(sc.streams) == 0 { - sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer.Reset(sc.srv.IdleTimeout) - } - if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdown() - } - } - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.CloseStream(st.id) -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - if sc.inGoAway && sc.goAwayCode != ErrCodeNo { - return nil - } - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - state, st := sc.state(id) - if id == 0 || state == stateIdle { - // Section 5.1: "Receiving any frame other than HEADERS - // or PRIORITY on a stream in this state MUST be - // treated as a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - if st != nil && st.resetQueued { - // Already have a stream error in flight. Don't send another. - return nil - } - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -func (sc *serverConn) processGoAway(f *GoAwayFrame) error { - sc.serveG.check() - if f.ErrCode != ErrCodeNo { - sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } else { - sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) - } - sc.startGracefulShutdown() - // http://tools.ietf.org/html/rfc7540#section-6.8 - // We should not create any new streams, which means we should disable push. - sc.pushEnabled = false - return nil -} - -// isPushed reports whether the stream is server-initiated. -func (st *stream) isPushed() bool { - return st.id%2 == 0 -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://tools.ietf.org/html/rfc7540#section-5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - if st := sc.streams[f.StreamID]; st != nil { - if st.resetQueued { - // We're sending RST_STREAM to close the stream, so don't bother - // processing this frame. - return nil - } - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxClientStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxClientStreamID = id - - if sc.idleTimer != nil { - sc.idleTimer.Stop() - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.2 - // [...] Endpoints MUST NOT exceed the limit set by their peer. An - // endpoint that receives a HEADERS frame that causes their - // advertised concurrent stream limit to be exceeded MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR - // or REFUSED_STREAM. - if sc.curClientStreams+1 > sc.advMaxStreams { - if sc.unackedSettings == 0 { - // They should know better. - return streamError(id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(id, ErrCodeRefusedStream) - } - - initialState := stateOpen - if f.StreamEnded() { - initialState = stateHalfClosedRemote - } - st := sc.newStream(id, 0, initialState) - - if f.HasPriority() { - if err := checkPriority(f.StreamID, f.Priority); err != nil { - return err - } - sc.writeSched.AdjustStream(st.id, f.Priority) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { - handler = new400Handler(err) - } - - // The net/http package sets the read deadline from the - // http.Server.ReadTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here after the request headers are read, - // similar to how the http1 server works. Here it's - // technically more like the http1 Server's ReadHeaderTimeout - // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func checkPriority(streamID uint32, p PriorityParam) error { - if streamID == p.StreamDep { - // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat - // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." - // Section 5.3.3 says that a stream can depend on one of its dependencies, - // so it's only self-dependencies that are forbidden. - return streamError(streamID, ErrCodeProtocol) - } - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - if sc.inGoAway { - return nil - } - if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { - return err - } - sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) - return nil -} - -func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { - sc.serveG.check() - if id == 0 { - panic("internal error: cannot create stream with id 0") - } - - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st := &stream{ - sc: sc, - id: id, - state: state, - ctx: ctx, - cancelCtx: cancelCtx, - } - st.cw.Init() - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings - - sc.streams[id] = st - sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) - if st.isPushed() { - sc.curPushedStreams++ - } else { - sc.curClientStreams++ - } - if sc.curOpenStreams() == 1 { - sc.setConnState(http.StateActive) - } - - return st -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - } - - isConnect := rp.method == "CONNECT" - if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - rp.header = make(http.Header) - for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") - } - - rw, req, err := sc.newWriterAndRequestNoBody(st, rp) - if err != nil { - return nil, nil, err - } - if bodyOpen { - st.reqBuf = getRequestBodyBuf() - req.Body.(*requestBody).pipe = &pipe{ - b: &fixedBuffer{buf: st.reqBuf}, - } - - if vv, ok := rp.header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - } - return rw, req, nil -} - -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { - tlsState = sc.tlsState - } - - needsContinue := rp.header.Get("Expect") == "100-continue" - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, streamError(st.id, ErrCodeProtocol) - } - requestURI = rp.path - } - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - req := &http.Request{ - Method: rp.method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: rp.authority, - Body: body, - Trailer: trailer, - } - req = requestWithContext(req, st.ctx) - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -var reqBodyCache = make(chan []byte, 8) - -func getRequestBodyBuf() []byte { - select { - case b := <-reqBodyCache: - return b - default: - return make([]byte, initialWindowSize) - } -} - -func putRequestBodyBuf(b []byte) { - select { - case reqBodyCache <- b: - default: - } -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - sc.writeFrameFromHandler(FrameWriteRequest{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - // Same as net/http: - if shouldLogPanic(e) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - } - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(FrameWriteRequest{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(FrameWriteRequest{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { - sc.serveG.checkNotOn() // NOT on - if n > 0 { - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } - } - if err == io.EOF { - if buf := st.reqBuf; buf != nil { - st.reqBuf = nil // shouldn't matter; field unused by other - putRequestBodyBuf(buf) - } - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(FrameWriteRequest{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -// requestBody is the Handler's Request.Body type. -// Read and Close may be called concurrently. -type requestBody struct { - stream *stream - conn *serverConn - closed bool // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil && !b.closed { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil || b.sawEOF { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if err == io.EOF { - b.sawEOF = true - } - if b.conn == nil && inTests { - return - } - b.conn.noteBodyReadFromHandler(b.stream, n, err) - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - cw := rws.stream.cw - go func() { - cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - rws.handlerDone = true - w.Flush() - w.rws = nil - responseWriterStatePool.Put(rws) -} - -// Push errors. -var ( - ErrRecursivePush = errors.New("http2: recursive push not allowed") - ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") -) - -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} - -func (w *responseWriter) push(target string, opts pushOptions) error { - st := w.rws.stream - sc := st.sc - sc.serveG.checkNotOn() - - // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." - // http://tools.ietf.org/html/rfc7540#section-6.6 - if st.isPushed() { - return ErrRecursivePush - } - - // Default options. - if opts.Method == "" { - opts.Method = "GET" - } - if opts.Header == nil { - opts.Header = http.Header{} - } - wantScheme := "http" - if w.rws.req.TLS != nil { - wantScheme = "https" - } - - // Validate the request. - u, err := url.Parse(target) - if err != nil { - return err - } - if u.Scheme == "" { - if !strings.HasPrefix(target, "/") { - return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) - } - u.Scheme = wantScheme - u.Host = w.rws.req.Host - } else { - if u.Scheme != wantScheme { - return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) - } - if u.Host == "" { - return errors.New("URL must have a host") - } - } - for k := range opts.Header { - if strings.HasPrefix(k, ":") { - return fmt.Errorf("promised request headers cannot include pseudo header %q", k) - } - // These headers are meaningful only if the request has a body, - // but PUSH_PROMISE requests cannot have a body. - // http://tools.ietf.org/html/rfc7540#section-8.2 - // Also disallow Host, since the promised URL must be absolute. - switch strings.ToLower(k) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("promised request headers cannot include %q", k) - } - } - if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { - return err - } - - // The RFC effectively limits promised requests to GET and HEAD: - // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" - // http://tools.ietf.org/html/rfc7540#section-8.2 - if opts.Method != "GET" && opts.Method != "HEAD" { - return fmt.Errorf("method %q must be GET or HEAD", opts.Method) - } - - msg := startPushRequest{ - parent: st, - method: opts.Method, - url: u, - header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case sc.wantStartPushCh <- msg: - } - - select { - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - case err := <-msg.done: - errChanPool.Put(msg.done) - return err - } -} - -type startPushRequest struct { - parent *stream - method string - url *url.URL - header http.Header - done chan error -} - -func (sc *serverConn) startPush(msg startPushRequest) { - sc.serveG.check() - - // http://tools.ietf.org/html/rfc7540#section-6.6. - // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that - // is in either the "open" or "half-closed (remote)" state. - if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { - // responseWriter.Push checks that the stream is peer-initiaed. - msg.done <- errStreamClosed - return - } - - // http://tools.ietf.org/html/rfc7540#section-6.6. - if !sc.pushEnabled { - msg.done <- http.ErrNotSupported - return - } - - // PUSH_PROMISE frames must be sent in increasing order by stream ID, so - // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE - // is written. Once the ID is allocated, we start the request handler. - allocatePromisedID := func() (uint32, error) { - sc.serveG.check() - - // Check this again, just in case. Technically, we might have received - // an updated SETTINGS by the time we got around to writing this frame. - if !sc.pushEnabled { - return 0, http.ErrNotSupported - } - // http://tools.ietf.org/html/rfc7540#section-6.5.2. - if sc.curPushedStreams+1 > sc.clientMaxStreams { - return 0, ErrPushLimitReached - } - - // http://tools.ietf.org/html/rfc7540#section-5.1.1. - // Streams initiated by the server MUST use even-numbered identifiers. - // A server that is unable to establish a new stream identifier can send a GOAWAY - // frame so that the client is forced to open a new connection for new streams. - if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdown() - return 0, ErrPushLimitReached - } - sc.maxPushPromiseID += 2 - promisedID := sc.maxPushPromiseID - - // http://tools.ietf.org/html/rfc7540#section-8.2. - // Strictly speaking, the new stream should start in "reserved (local)", then - // transition to "half closed (remote)" after sending the initial HEADERS, but - // we start in "half closed (remote)" for simplicity. - // See further comments at the definition of stateHalfClosedRemote. - promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE - }) - if err != nil { - // Should not happen, since we've already validated msg.url. - panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) - return promisedID, nil - } - - sc.writeFrame(FrameWriteRequest{ - write: &writePushPromise{ - streamID: msg.parent.id, - method: msg.method, - url: msg.url, - h: msg.header, - allocatePromisedID: allocatePromisedID, - }, - stream: msg.parent, - done: msg.done, - }) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2RequestHeaders(h http.Header) error { - for _, k := range connHeaders { - if _, ok := h[k]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", k) - } - } - te := h["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - -// h1ServerShutdownChan returns a channel that will be closed when the -// provided *http.Server wants to shut down. -// -// This is a somewhat hacky way to get at http1 innards. It works -// when the http2 code is bundled into the net/http package in the -// standard library. The alternatives ended up making the cmd/go tool -// depend on http Servers. This is the lightest option for now. -// This is tested via the TestServeShutdown* tests in net/http. -func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { - if fn := testh1ServerShutdownChan; fn != nil { - return fn(hs) - } - var x interface{} = hs - type I interface { - getDoneChan() <-chan struct{} - } - if hs, ok := x.(I); ok { - return hs.getDoneChan() - } - return nil -} - -// optional test hook for h1ServerShutdownChan. -var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} - -// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives -// disabled. See comments on h1ServerShutdownChan above for why -// the code is written this way. -func h1ServerKeepAlivesDisabled(hs *http.Server) bool { - var x interface{} = hs - type I interface { - doKeepAlives() bool - } - if hs, ok := x.(I); ok { - return !hs.doKeepAlives() - } - return false -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index 0c7e859db..000000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,2129 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/rand" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *clientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - startedWrite bool // started request body write; guarded by cc.mu - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel runs in its own goroutine and waits for the user -// to cancel a RoundTrip request, its context to expire, or for the -// request to be done (any way it might be removed from the cc.streams -// map: peer reset, successful completion, TCP connection breakage, -// etc) -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - ctx := reqContext(req) - if req.Cancel == nil && ctx.Done() == nil { - return - } - select { - case <-req.Cancel: - cs.cancelStream() - cs.bufPipe.CloseWithError(errRequestCanceled) - case <-ctx.Done(): - cs.cancelStream() - cs.bufPipe.CloseWithError(ctx.Err()) - case <-cs.done: - } -} - -func (cs *clientStream) cancelStream() { - cs.cc.mu.Lock() - didReset := cs.didReset - cs.didReset = true - cs.cc.mu.Unlock() - - if !didReset { - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -var ErrNoCachedConn = errors.New("http2: no cached connection was available") - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - traceGotConn(req, cc) - res, err := cc.RoundTrip(req) - if err != nil { - if req, err = shouldRetryRequest(req, err); err == nil { - continue - } - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") - errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") -) - -// shouldRetryRequest is called by RoundTrip when a request fails to get -// response headers. It is always called with a non-nil error. -// It returns either a request to retry (either the same request, or a -// modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { - switch err { - default: - return nil, err - case errClientConnUnusable, errClientConnGotGoAway: - return req, nil - case errClientConnGotGoAwayAfterSomeReqBody: - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return req, nil - } - // Otherwise we depend on the Request having its GetBody - // func defined. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") - } - body, err := getBody() - if err != nil { - return nil, err - } - newReq := *req - newReq.Body = body - return &newReq, nil - } -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return transportExpectContinueTimeout(t.t1) -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } - last := f.LastStreamID - for streamID, cs := range cc.streams { - if streamID > last { - select { - case cs.resc <- resAndError{err: errClientConnGotGoAway}: - default: - } - } - } -} - -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - if cc.singleUse && cc.nextStreamID > 1 { - return false - } - return cc.goAway == nil && !cc.closed && - int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < math.MaxInt32 -} - -// onIdleTimeout is called from a time.AfterFunc goroutine. It will -// only be called when we're idle, but because we're coming from a new -// goroutine, there could be a new request coming in at the same time, -// so this simply calls the synchronized closeIfIdle to shut down this -// connection. The timer could just call closeIfIdle, but this is more -// clear. -func (cc *ClientConn) onIdleTimeout() { - cc.closeIfIdle() -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := checkConnHeaders(req); err != nil { - return nil, err - } - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, err - } - hasTrailers := trailers != "" - - cc.mu.Lock() - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - cc.mu.Unlock() - return nil, errClientConnUnusable - } - - body := req.Body - hasBody := body != nil - contentLen := actualContentLength(req) - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = requestTrace(req) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := reqContext(req) - - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - if re.err == errClientConnGotGoAway { - cc.mu.Lock() - if cs.startedWrite { - re.err = errClientConnGotGoAwayAfterSomeReqBody - } - cc.mu.Unlock() - } - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errTimeout - case <-ctx.Done(): - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, ctx.Err() - case <-req.Cancel: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - return nil, err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httplex.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) - } - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { - cc.hbuf.Reset() - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes() -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - if len(cc.streams) == 0 && cc.idleTimer != nil { - cc.idleTimer.Reset(cc.idleTimeout) - } - close(cs.done) - cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - if cc.idleTimer != nil { - cc.idleTimer.Stop() - } - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } - for _, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if f.Length > 0 { - if len(data) > 0 && cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) - } - - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - cs.inflow.add(pad) - cc.inflow.add(pad) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(pad)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) - cc.bw.Flush() - cc.wmu.Unlock() - } - didReset := cs.didReset - cc.mu.Unlock() - - if len(data) > 0 && !didReset { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - delete(rl.activeRes, cs.ID) - return nil -} - -// Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { - c := make(chan struct{}) - // Generate a random payload - var p [8]byte - for { - if _, err := rand.Read(p[:]); err != nil { - return err - } - cc.mu.Lock() - // check for dup before insert - if _, found := cc.pings[p]; !found { - cc.pings[p] = c - cc.mu.Unlock() - break - } - cc.mu.Unlock() - } - cc.wmu.Lock() - if err := cc.fr.WritePing(false, p); err != nil { - cc.wmu.Unlock() - return err - } - if err := cc.bw.Flush(); err != nil { - cc.wmu.Unlock() - return err - } - cc.wmu.Unlock() - select { - case <-c: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-cc.readerDone: - // connection closed - return cc.readerErr - } -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - // If ack, notify listener if any - if c, ok := cc.pings[f.Data]; ok { - close(c) - delete(cc.pings, f.Data) - } - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - cs.cc.mu.Lock() - cs.startedWrite = true - cs.cc.mu.Unlock() - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httplex.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 6b0dfae31..000000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "net/url" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error - - // staysWithinBuffer reports whether this writer promises that - // it will only write less than or equal to size bytes, and it - // won't Flush the write context. - staysWithinBuffer(size int) bool -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// writeEndsStream reports whether w writes a frame that will transition -// the stream to a half-closed local state. This returns false for RST_STREAM, -// which closes the entire stream (not just the local half). -func writeEndsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("writeEndsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } - -type writeSettings []Setting - -func (s writeSettings) staysWithinBuffer(max int) bool { - const settingSize = 6 // uint16 + uint32 - return frameHeaderLen+settingSize*len(s) <= max - -} - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } - return err -} - -func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -func (w *writeData) staysWithinBuffer(max int) bool { - return frameHeaderLen+len(w.p) <= max -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } - -// splitHeaderBlock splits headerBlock into fragments so that each fragment fits -// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true -// for the first/last fragment, respectively. -func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { - return err - } - first = false - } - return nil -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) staysWithinBuffer(max int) bool { - // TODO: this is a common one. It'd be nice to return true - // here and get into the fast path if we could be clever and - // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and - // w.trailers are nil, so we don't need to enumerate it.) - // Otherwise I'm afraid that just calculating the length to - // answer this question would be slower than the ~2µs benefit. - return false -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. -type writePushPromise struct { - streamID uint32 // pusher stream - method string // for :method - url *url.URL // for :scheme, :authority, :path - h http.Header - - // Creates an ID for a pushed stream. This runs on serveG just before - // the frame is written. The returned ID is copied to promisedID. - allocatePromisedID func() (uint32, error) - promisedID uint32 -} - -func (w *writePushPromise) staysWithinBuffer(max int) bool { - // TODO: see writeResHeaders.staysWithinBuffer - return false -} - -func (w *writePushPromise) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - encKV(enc, ":method", w.method) - encKV(enc, ":scheme", w.url.Scheme) - encKV(enc, ":authority", w.url.Host) - encKV(enc, ":path", w.url.RequestURI()) - encodeHeaders(enc, w.h, nil) - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 { - panic("unexpected empty hpack") - } - - return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) -} - -func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { - if firstFrag { - return ctx.Framer().WritePushPromise(PushPromiseParam{ - StreamID: w.streamID, - PromiseID: w.promisedID, - BlockFragment: frag, - EndHeaders: lastFrag, - }) - } else { - return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) - } -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { - // Sloppy but conservative: - return 9+2*(len(":status")+len("100")) <= max -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index 4fe307307..000000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// WriteScheduler is the interface implemented by HTTP/2 write schedulers. -// Methods are never called concurrently. -type WriteScheduler interface { - // OpenStream opens a new stream in the write scheduler. - // It is illegal to call this with streamID=0 or with a streamID that is - // already open -- the call may panic. - OpenStream(streamID uint32, options OpenStreamOptions) - - // CloseStream closes a stream in the write scheduler. Any frames queued on - // this stream should be discarded. It is illegal to call this on a stream - // that is not open -- the call may panic. - CloseStream(streamID uint32) - - // AdjustStream adjusts the priority of the given stream. This may be called - // on a stream that has not yet been opened or has been closed. Note that - // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: - // https://tools.ietf.org/html/rfc7540#section-5.1 - AdjustStream(streamID uint32, priority PriorityParam) - - // Push queues a frame in the scheduler. In most cases, this will not be - // called with wr.StreamID()!=0 unless that stream is currently open. The one - // exception is RST_STREAM frames, which may be sent on idle or closed streams. - Push(wr FrameWriteRequest) - - // Pop dequeues the next frame to write. Returns false if no frames can - // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd. - Pop() (wr FrameWriteRequest, ok bool) -} - -// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. -type OpenStreamOptions struct { - // PusherID is zero if the stream was initiated by the client. Otherwise, - // PusherID names the stream that pushed the newly opened stream. - PusherID uint32 -} - -// FrameWriteRequest is a request to write a frame. -type FrameWriteRequest struct { - // write is the interface value that does the writing, once the - // WriteScheduler has selected this frame to write. The write - // functions are all defined in write.go. - write writeFramer - - // stream is the stream on which this frame will be written. - // nil for non-stream frames like PING and SETTINGS. - stream *stream - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// StreamID returns the id of the stream this frame will be written to. -// 0 is used for non-stream frames such as PING and SETTINGS. -func (wr FrameWriteRequest) StreamID() uint32 { - if wr.stream == nil { - if se, ok := wr.write.(StreamError); ok { - // (*serverConn).resetStream doesn't set - // stream because it doesn't necessarily have - // one. So special case this type of write - // message. - return se.StreamID - } - return 0 - } - return wr.stream.id -} - -// DataSize returns the number of flow control bytes that must be consumed -// to write this entire frame. This is 0 for non-DATA frames. -func (wr FrameWriteRequest) DataSize() int { - if wd, ok := wr.write.(*writeData); ok { - return len(wd.p) - } - return 0 -} - -// Consume consumes min(n, available) bytes from this frame, where available -// is the number of flow control bytes available on the stream. Consume returns -// 0, 1, or 2 frames, where the integer return value gives the number of frames -// returned. -// -// If flow control prevents consuming any bytes, this returns (_, _, 0). If -// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this -// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and -// 'rest' contains the remaining bytes. The consumed bytes are deducted from the -// underlying stream's flow control budget. -func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { - var empty FrameWriteRequest - - // Non-DATA frames are always consumed whole. - wd, ok := wr.write.(*writeData) - if !ok || len(wd.p) == 0 { - return wr, empty, 1 - } - - // Might need to split after applying limits. - allowed := wr.stream.flow.available() - if n < allowed { - allowed = n - } - if wr.stream.sc.maxFrameSize < allowed { - allowed = wr.stream.sc.maxFrameSize - } - if allowed <= 0 { - return empty, empty, 0 - } - if len(wd.p) > int(allowed) { - wr.stream.flow.take(allowed) - consumed := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[:allowed], - // Even if the original had endStream set, there - // are bytes remaining because len(wd.p) > allowed, - // so we know endStream is false. - endStream: false, - }, - // Our caller is blocking on the final DATA frame, not - // this intermediate frame, so no need to wait. - done: nil, - } - rest := FrameWriteRequest{ - stream: wr.stream, - write: &writeData{ - streamID: wd.streamID, - p: wd.p[allowed:], - endStream: wd.endStream, - }, - done: wr.done, - } - return consumed, rest, 2 - } - - // The frame is consumed whole. - // NB: This cast cannot overflow because allowed is <= math.MaxInt32. - wr.stream.flow.take(int32(len(wd.p))) - return wr, empty, 1 -} - -// String is for debugging only. -func (wr FrameWriteRequest) String() string { - var des string - if s, ok := wr.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wr.write) - } - return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) -} - -// replyToWriter sends err to wr.done and panics if the send must block -// This does nothing if wr.done is nil. -func (wr *FrameWriteRequest) replyToWriter(err error) { - if wr.done == nil { - return - } - select { - case wr.done <- err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) - } - wr.write = nil // prevent use (assume it's tainted after wr.done send) -} - -// writeQueue is used by implementations of WriteScheduler. -type writeQueue struct { - s []FrameWriteRequest -} - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) -} - -func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] - return wr -} - -// consume consumes up to n bytes from q.s[0]. If the frame is -// entirely consumed, it is removed from the queue. If the frame -// is partially consumed, the frame is kept with the consumed -// bytes removed. Returns true iff any bytes were consumed. -func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { - return FrameWriteRequest{}, false - } - consumed, rest, numresult := q.s[0].Consume(n) - switch numresult { - case 0: - return FrameWriteRequest{}, false - case 1: - q.shift() - case 2: - q.s[0] = rest - } - return consumed, true -} - -type writeQueuePool []*writeQueue - -// put inserts an unused writeQueue into the pool. -func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} - } - q.s = q.s[:0] - *p = append(*p, q) -} - -// get returns an empty writeQueue. -func (p *writeQueuePool) get() *writeQueue { - ln := len(*p) - if ln == 0 { - return new(writeQueue) - } - x := ln - 1 - q := (*p)[x] - (*p)[x] = nil - *p = (*p)[:x] - return q -} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go deleted file mode 100644 index 01132721b..000000000 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "fmt" - "math" - "sort" -) - -// RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 - -// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. -type PriorityWriteSchedulerConfig struct { - // MaxClosedNodesInTree controls the maximum number of closed streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // "It is possible for a stream to become closed while prioritization - // information ... is in transit. ... This potentially creates suboptimal - // prioritization, since the stream could be given a priority that is - // different from what is intended. To avoid these problems, an endpoint - // SHOULD retain stream prioritization state for a period after streams - // become closed. The longer state is retained, the lower the chance that - // streams are assigned incorrect or default priority values." - MaxClosedNodesInTree int - - // MaxIdleNodesInTree controls the maximum number of idle streams to - // retain in the priority tree. Setting this to zero saves a small amount - // of memory at the cost of performance. - // - // See RFC 7540, Section 5.3.4: - // Similarly, streams that are in the "idle" state can be assigned - // priority or become a parent of other streams. This allows for the - // creation of a grouping node in the dependency tree, which enables - // more flexible expressions of priority. Idle streams begin with a - // default priority (Section 5.3.5). - MaxIdleNodesInTree int - - // ThrottleOutOfOrderWrites enables write throttling to help ensure that - // data is delivered in priority order. This works around a race where - // stream B depends on stream A and both streams are about to call Write - // to queue DATA frames. If B wins the race, a naive scheduler would eagerly - // write as much data from B as possible, but this is suboptimal because A - // is a higher-priority stream. With throttling enabled, we write a small - // amount of data from B to minimize the amount of bandwidth that B can - // steal from A. - ThrottleOutOfOrderWrites bool -} - -// NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. -// If cfg is nil, default options are used. -func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { - if cfg == nil { - // For justification of these defaults, see: - // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY - cfg = &PriorityWriteSchedulerConfig{ - MaxClosedNodesInTree: 10, - MaxIdleNodesInTree: 10, - ThrottleOutOfOrderWrites: false, - } - } - - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), - maxClosedNodesInTree: cfg.MaxClosedNodesInTree, - maxIdleNodesInTree: cfg.MaxIdleNodesInTree, - enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, - } - ws.nodes[0] = &ws.root - if cfg.ThrottleOutOfOrderWrites { - ws.writeThrottleLimit = 1024 - } else { - ws.writeThrottleLimit = math.MaxInt32 - } - return ws -} - -type priorityNodeState int - -const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle -) - -// priorityNode is a node in an HTTP/2 priority tree. -// Each node is associated with a single stream ID. -// See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree - - // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings -} - -func (n *priorityNode) setParent(parent *priorityNode) { - if n == parent { - panic("setParent to self") - } - if n.parent == parent { - return - } - // Unlink from current parent. - if parent := n.parent; parent != nil { - if n.prev == nil { - parent.kids = n.next - } else { - n.prev.next = n.next - } - if n.next != nil { - n.next.prev = n.prev - } - } - // Link to new parent. - // If parent=nil, remove n from the tree. - // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). - n.parent = parent - if parent == nil { - n.next = nil - n.prev = nil - } else { - n.next = parent.kids - n.prev = nil - if n.next != nil { - n.next.prev = n - } - parent.kids = n - } -} - -func (n *priorityNode) addBytes(b int64) { - n.bytes += b - for ; n != nil; n = n.parent { - n.subtreeBytes += b - } -} - -// walkReadyInOrder iterates over the tree in priority order, calling f for each node -// with a non-empty write queue. When f returns true, this funcion returns true and the -// walk halts. tmp is used as scratch space for sorting. -// -// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true -// if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { - if !n.q.empty() && f(n, openParent) { - return true - } - if n.kids == nil { - return false - } - - // Don't consider the root "open" when updating openParent since - // we can't send data frames on the root stream (only control frames). - if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) - } - - // Common case: only one kid or all kids have the same weight. - // Some clients don't use weights; other clients (like web browsers) - // use mostly-linear priority trees. - w := n.kids.weight - needSort := false - for k := n.kids.next; k != nil; k = k.next { - if k.weight != w { - needSort = true - break - } - } - if !needSort { - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false - } - - // Uncommon case: sort the child nodes. We remove the kids from the parent, - // then re-insert after sorting so we can reuse tmp for future sort calls. - *tmp = (*tmp)[:0] - for n.kids != nil { - *tmp = append(*tmp, n.kids) - n.kids.setParent(nil) - } - sort.Sort(sortPriorityNodeSiblings(*tmp)) - for i := len(*tmp) - 1; i >= 0; i-- { - (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids - } - for k := n.kids; k != nil; k = k.next { - if k.walkReadyInOrder(openParent, tmp, f) { - return true - } - } - return false -} - -type sortPriorityNodeSiblings []*priorityNode - -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { - // Prefer the subtree that has sent fewer bytes relative to its weight. - // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) - if bi == 0 && bk == 0 { - return wi >= wk - } - if bk == 0 { - return false - } - return bi/bk <= wi/wk -} - -type priorityWriteScheduler struct { - // root is the root of the priority tree, where root.id = 0. - // The root queues control frames that are not associated with any stream. - root priorityNode - - // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode - - // maxID is the maximum stream id in nodes. - maxID uint32 - - // lists of nodes that have been closed or are idle, but are kept in - // the tree for improved prioritization. When the lengths exceed either - // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode - - // From the config. - maxClosedNodesInTree int - maxIdleNodesInTree int - writeThrottleLimit int32 - enableWriteThrottle bool - - // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // The stream may be currently idle but cannot be opened or closed. - if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { - panic(fmt.Sprintf("stream %d already opened", streamID)) - } - curr.state = priorityNodeOpen - return - } - - // RFC 7540, Section 5.3.5: - // "All streams are initially assigned a non-exclusive dependency on stream 0x0. - // Pushed streams initially depend on their associated stream. In both cases, - // streams are assigned a default weight of 16." - parent := ws.nodes[options.PusherID] - if parent == nil { - parent = &ws.root - } - n := &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, - } - n.setParent(parent) - ws.nodes[streamID] = n - if streamID > ws.maxID { - ws.maxID = streamID - } -} - -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { - if streamID == 0 { - panic("violation of WriteScheduler interface: cannot close stream 0") - } - if ws.nodes[streamID] == nil { - panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) - } - if ws.nodes[streamID].state != priorityNodeOpen { - panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) - } - - n := ws.nodes[streamID] - n.state = priorityNodeClosed - n.addBytes(-n.bytes) - - q := n.q - ws.queuePool.put(&q) - n.q.s = nil - if ws.maxClosedNodesInTree > 0 { - ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) - } else { - ws.removeNode(n) - } -} - -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - if streamID == 0 { - panic("adjustPriority on root") - } - - // If streamID does not exist, there are two cases: - // - A closed stream that has been removed (this will have ID <= maxID) - // - An idle stream that is being used for "grouping" (this will have ID > maxID) - n := ws.nodes[streamID] - if n == nil { - if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { - return - } - ws.maxID = streamID - n = &priorityNode{ - q: *ws.queuePool.get(), - id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, - } - n.setParent(&ws.root) - ws.nodes[streamID] = n - ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) - } - - // Section 5.3.1: A dependency on a stream that is not currently in the tree - // results in that stream being given a default priority (Section 5.3.5). - parent := ws.nodes[priority.StreamDep] - if parent == nil { - n.setParent(&ws.root) - n.weight = priorityDefaultWeight - return - } - - // Ignore if the client tries to make a node its own parent. - if n == parent { - return - } - - // Section 5.3.3: - // "If a stream is made dependent on one of its own dependencies, the - // formerly dependent stream is first moved to be dependent on the - // reprioritized stream's previous parent. The moved dependency retains - // its weight." - // - // That is: if parent depends on n, move parent to depend on n.parent. - for x := parent.parent; x != nil; x = x.parent { - if x == n { - parent.setParent(n.parent) - break - } - } - - // Section 5.3.3: The exclusive flag causes the stream to become the sole - // dependency of its parent stream, causing other dependencies to become - // dependent on the exclusive stream. - if priority.Exclusive { - k := parent.kids - for k != nil { - next := k.next - if k != n { - k.setParent(n) - } - k = next - } - } - - n.setParent(parent) - n.weight = priority.Weight -} - -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode - if id := wr.StreamID(); id == 0 { - n = &ws.root - } else { - n = ws.nodes[id] - if n == nil { - // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. - if wr.DataSize() > 0 { - panic("add DATA on non-open stream") - } - n = &ws.root - } - } - n.q.push(wr) -} - -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { - limit := int32(math.MaxInt32) - if openParent { - limit = ws.writeThrottleLimit - } - wr, ok = n.q.consume(limit) - if !ok { - return false - } - n.addBytes(int64(wr.DataSize())) - // If B depends on A and B continuously has data available but A - // does not, gradually increase the throttling limit to allow B to - // steal more and more bandwidth from A. - if openParent { - ws.writeThrottleLimit += 1024 - if ws.writeThrottleLimit < 0 { - ws.writeThrottleLimit = math.MaxInt32 - } - } else if ws.enableWriteThrottle { - ws.writeThrottleLimit = 1024 - } - return true - }) - return wr, ok -} - -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { - if maxSize == 0 { - return - } - if len(*list) == maxSize { - // Remove the oldest node, then shift left. - ws.removeNode((*list)[0]) - x := (*list)[1:] - copy(*list, x) - *list = (*list)[:len(x)] - } - *list = append(*list, n) -} - -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) - } - n.setParent(nil) - delete(ws.nodes, n.id) -} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go deleted file mode 100644 index 36d7919f1..000000000 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "math" - -// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 -// priorities. Control frames like SETTINGS and PING are written before DATA -// frames, but if no control frames are queued and multiple streams have queued -// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. -func NewRandomWriteScheduler() WriteScheduler { - return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} -} - -type randomWriteScheduler struct { - // zero are frames not associated with a specific stream. - zero writeQueue - - // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. - sq map[uint32]*writeQueue - - // pool of empty queues for reuse. - queuePool writeQueuePool -} - -func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { - // no-op: idle streams are not tracked -} - -func (ws *randomWriteScheduler) CloseStream(streamID uint32) { - q, ok := ws.sq[streamID] - if !ok { - return - } - delete(ws.sq, streamID) - ws.queuePool.put(q) -} - -func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { - // no-op: priorities are ignored -} - -func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - id := wr.StreamID() - if id == 0 { - ws.zero.push(wr) - return - } - q, ok := ws.sq[id] - if !ok { - q = ws.queuePool.get() - ws.sq[id] = q - } - q.push(wr) -} - -func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control frames first. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { - if wr, ok := q.consume(math.MaxInt32); ok { - return wr, true - } - } - return FrameWriteRequest{}, false -} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go deleted file mode 100644 index 3daa8979e..000000000 --- a/vendor/golang.org/x/net/idna/idna.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package idna implements IDNA2008 (Internationalized Domain Names for -// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and -// RFC 5894. -package idna // import "golang.org/x/net/idna" - -import ( - "strings" - "unicode/utf8" -) - -// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or -// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". -func ToASCII(s string) (string, error) { - if ascii(s) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if !ascii(label) { - a, err := encode(acePrefix, label) - if err != nil { - return "", err - } - labels[i] = a - } - } - return strings.Join(labels, "."), nil -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". -func ToUnicode(s string) (string, error) { - if !strings.Contains(s, acePrefix) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if strings.HasPrefix(label, acePrefix) { - u, err := decode(label[len(acePrefix):]) - if err != nil { - return "", err - } - labels[i] = u - } - } - return strings.Join(labels, "."), nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go deleted file mode 100644 index 92e733f6a..000000000 --- a/vendor/golang.org/x/net/idna/punycode.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "fmt" - "math" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -// decode decodes a string as specified in section 6.2. -func decode(encoded string) (string, error) { - if encoded == "" { - return "", nil - } - pos := 1 + strings.LastIndex(encoded, "-") - if pos == 1 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - if pos == len(encoded) { - return encoded[:len(encoded)-1], nil - } - output := make([]rune, 0, len(encoded)) - if pos != 0 { - for _, r := range encoded[:pos-1] { - output = append(output, r) - } - } - i, n, bias := int32(0), initialN, initialBias - for pos < len(encoded) { - oldI, w := i, int32(1) - for k := base; ; k += base { - if pos == len(encoded) { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - digit, ok := decodeDigit(encoded[pos]) - if !ok { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - pos++ - i += digit * w - if i < 0 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if digit < t { - break - } - w *= base - t - if w >= math.MaxInt32/base { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - } - x := int32(len(output) + 1) - bias = adapt(i-oldI, x, oldI == 0) - n += i / x - i %= x - if n > utf8.MaxRune || len(output) >= 1024 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - output = append(output, 0) - copy(output[i+1:], output[i:]) - output[i] = n - i++ - } - return string(output), nil -} - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func decodeDigit(x byte) (digit int32, ok bool) { - switch { - case '0' <= x && x <= '9': - return int32(x - ('0' - 26)), true - case 'A' <= x && x <= 'Z': - return int32(x - 'A'), true - case 'a' <= x && x <= 'z': - return int32(x - 'a'), true - } - return 0, false -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("idna: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go deleted file mode 100644 index 20f2b8940..000000000 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex - -import ( - "net" - "strings" - "unicode/utf8" - - "golang.org/x/net/idna" -) - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) -} - -// HeaderValuesContainsToken reports whether any string in values -// contains the provided token, ASCII case-insensitively. -func HeaderValuesContainsToken(values []string, token string) bool { - for _, v := range values { - if headerValueContainsToken(v, token) { - return true - } - } - return false -} - -// isOWS reports whether b is an optional whitespace byte, as defined -// by RFC 7230 section 3.2.3. -func isOWS(b byte) bool { return b == ' ' || b == '\t' } - -// trimOWS returns x with all optional whitespace removes from the -// beginning and end. -func trimOWS(x string) string { - // TODO: consider using strings.Trim(x, " \t") instead, - // if and when it's fast enough. See issue 10292. - // But this ASCII-only code will probably always beat UTF-8 - // aware code. - for len(x) > 0 && isOWS(x[0]) { - x = x[1:] - } - for len(x) > 0 && isOWS(x[len(x)-1]) { - x = x[:len(x)-1] - } - return x -} - -// headerValueContainsToken reports whether v (assumed to be a -// 0#element, in the ABNF extension described in RFC 7230 section 7) -// contains token amongst its comma-separated tokens, ASCII -// case-insensitively. -func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) - } - return tokenEqual(v, token) -} - -// lowerASCII returns the ASCII lowercase version of b. -func lowerASCII(b byte) byte { - if 'A' <= b && b <= 'Z' { - return b + ('a' - 'A') - } - return b -} - -// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. -func tokenEqual(t1, t2 string) bool { - if len(t1) != len(t2) { - return false - } - for i, b := range t1 { - if b >= utf8.RuneSelf { - // No UTF-8 or non-ASCII allowed in tokens. - return false - } - if lowerASCII(byte(b)) != lowerASCII(t2[i]) { - return false - } - } - return true -} - -// isLWS reports whether b is linear white space, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) -func isLWS(b byte) bool { return b == ' ' || b == '\t' } - -// isCTL reports whether b is a control byte, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = -func isCTL(b byte) bool { - const del = 0x7f // a CTL - return b < ' ' || b == del -} - -// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. -// HTTP/2 imposes the additional restriction that uppercase ASCII -// letters are not allowed. -// -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -func ValidHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !IsTokenRune(r) { - return false - } - } - return true -} - -// ValidHostHeader reports whether h is a valid host header. -func ValidHostHeader(h string) bool { - // The latest spec is actually this: - // - // http://tools.ietf.org/html/rfc7230#section-5.4 - // Host = uri-host [ ":" port ] - // - // Where uri-host is: - // http://tools.ietf.org/html/rfc3986#section-3.2.2 - // - // But we're going to be much more lenient for now and just - // search for any byte that's not a valid byte in any of those - // expressions. - for i := 0; i < len(h); i++ { - if !validHostByte[h[i]] { - return false - } - } - return true -} - -// See the validHostHeader comment. -var validHostByte = [256]bool{ - '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, - '8': true, '9': true, - - 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, - 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, - 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, - 'y': true, 'z': true, - - 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, - 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, - 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, - 'Y': true, 'Z': true, - - '!': true, // sub-delims - '$': true, // sub-delims - '%': true, // pct-encoded (and used in IPv6 zones) - '&': true, // sub-delims - '(': true, // sub-delims - ')': true, // sub-delims - '*': true, // sub-delims - '+': true, // sub-delims - ',': true, // sub-delims - '-': true, // unreserved - '.': true, // unreserved - ':': true, // IPv6address + Host expression's optional port - ';': true, // sub-delims - '=': true, // sub-delims - '[': true, - '\'': true, // sub-delims - ']': true, - '_': true, // unreserved - '~': true, // unreserved -} - -// ValidHeaderFieldValue reports whether v is a valid "field-value" according to -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : -// -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = -// -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : -// -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func ValidHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - b := v[i] - if isCTL(b) && !isLWS(b) { - return false - } - } - return true -} - -func isASCII(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} - -// PunycodeHostPort returns the IDNA Punycode version -// of the provided "host" or "host:port" string. -func PunycodeHostPort(v string) (string, error) { - if isASCII(v) { - return v, nil - } - - host, port, err := net.SplitHostPort(v) - if err != nil { - // The input 'v' argument was just a "host" argument, - // without a port. This error should not be returned - // to the caller. - host = v - port = "" - } - host, err = idna.ToASCII(host) - if err != nil { - // Non-UTF-8? Not representable in Punycode, in any - // case. - return "", err - } - if port == "" { - return host, nil - } - return net.JoinHostPort(host, port), nil -} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go deleted file mode 100644 index 20d1e1e38..000000000 --- a/vendor/golang.org/x/net/websocket/client.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "crypto/tls" - "io" - "net" - "net/http" - "net/url" -) - -// DialError is an error that occurs while dialling a websocket server. -type DialError struct { - *Config - Err error -} - -func (e *DialError) Error() string { - return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() -} - -// NewConfig creates a new WebSocket config for client connection. -func NewConfig(server, origin string) (config *Config, err error) { - config = new(Config) - config.Version = ProtocolVersionHybi13 - config.Location, err = url.ParseRequestURI(server) - if err != nil { - return - } - config.Origin, err = url.ParseRequestURI(origin) - if err != nil { - return - } - config.Header = http.Header(make(map[string][]string)) - return -} - -// NewClient creates a new WebSocket client connection over rwc. -func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { - br := bufio.NewReader(rwc) - bw := bufio.NewWriter(rwc) - err = hybiClientHandshake(config, br, bw) - if err != nil { - return - } - buf := bufio.NewReadWriter(br, bw) - ws = newHybiClientConn(config, buf, rwc) - return -} - -// Dial opens a new client connection to a WebSocket. -func Dial(url_, protocol, origin string) (ws *Conn, err error) { - config, err := NewConfig(url_, origin) - if err != nil { - return nil, err - } - if protocol != "" { - config.Protocol = []string{protocol} - } - return DialConfig(config) -} - -var portMap = map[string]string{ - "ws": "80", - "wss": "443", -} - -func parseAuthority(location *url.URL) string { - if _, ok := portMap[location.Scheme]; ok { - if _, _, err := net.SplitHostPort(location.Host); err != nil { - return net.JoinHostPort(location.Host, portMap[location.Scheme]) - } - } - return location.Host -} - -// DialConfig opens a new client connection to a WebSocket with a config. -func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn - if config.Location == nil { - return nil, &DialError{config, ErrBadWebSocketLocation} - } - if config.Origin == nil { - return nil, &DialError{config, ErrBadWebSocketOrigin} - } - switch config.Location.Scheme { - case "ws": - client, err = net.Dial("tcp", parseAuthority(config.Location)) - - case "wss": - client, err = tls.Dial("tcp", parseAuthority(config.Location), config.TlsConfig) - - default: - err = ErrBadScheme - } - if err != nil { - goto Error - } - - ws, err = NewClient(config, client) - if err != nil { - client.Close() - goto Error - } - return - -Error: - return nil, &DialError{config, err} -} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go deleted file mode 100644 index 60bbc8418..000000000 --- a/vendor/golang.org/x/net/websocket/hybi.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -// This file implements a protocol of hybi draft. -// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" -) - -const ( - websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" - - closeStatusNormal = 1000 - closeStatusGoingAway = 1001 - closeStatusProtocolError = 1002 - closeStatusUnsupportedData = 1003 - closeStatusFrameTooLarge = 1004 - closeStatusNoStatusRcvd = 1005 - closeStatusAbnormalClosure = 1006 - closeStatusBadMessageData = 1007 - closeStatusPolicyViolation = 1008 - closeStatusTooBigData = 1009 - closeStatusExtensionMismatch = 1010 - - maxControlFramePayloadLength = 125 -) - -var ( - ErrBadMaskingKey = &ProtocolError{"bad masking key"} - ErrBadPongMessage = &ProtocolError{"bad pong message"} - ErrBadClosingStatus = &ProtocolError{"bad closing status"} - ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} - ErrNotImplemented = &ProtocolError{"not implemented"} - - handshakeHeader = map[string]bool{ - "Host": true, - "Upgrade": true, - "Connection": true, - "Sec-Websocket-Key": true, - "Sec-Websocket-Origin": true, - "Sec-Websocket-Version": true, - "Sec-Websocket-Protocol": true, - "Sec-Websocket-Accept": true, - } -) - -// A hybiFrameHeader is a frame header as defined in hybi draft. -type hybiFrameHeader struct { - Fin bool - Rsv [3]bool - OpCode byte - Length int64 - MaskingKey []byte - - data *bytes.Buffer -} - -// A hybiFrameReader is a reader for hybi frame. -type hybiFrameReader struct { - reader io.Reader - - header hybiFrameHeader - pos int64 - length int -} - -func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { - n, err = frame.reader.Read(msg) - if err != nil { - return 0, err - } - if frame.header.MaskingKey != nil { - for i := 0; i < n; i++ { - msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] - frame.pos++ - } - } - return n, err -} - -func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } - -func (frame *hybiFrameReader) HeaderReader() io.Reader { - if frame.header.data == nil { - return nil - } - if frame.header.data.Len() == 0 { - return nil - } - return frame.header.data -} - -func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } - -func (frame *hybiFrameReader) Len() (n int) { return frame.length } - -// A hybiFrameReaderFactory creates new frame reader based on its frame type. -type hybiFrameReaderFactory struct { - *bufio.Reader -} - -// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. -// See Section 5.2 Base Framing protocol for detail. -// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 -func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { - hybiFrame := new(hybiFrameReader) - frame = hybiFrame - var header []byte - var b byte - // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) - b, err = buf.ReadByte() - if err != nil { - return - } - header = append(header, b) - hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 - for i := 0; i < 3; i++ { - j := uint(6 - i) - hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 - } - hybiFrame.header.OpCode = header[0] & 0x0f - - // Second byte. Mask/Payload len(7bits) - b, err = buf.ReadByte() - if err != nil { - return - } - header = append(header, b) - mask := (b & 0x80) != 0 - b &= 0x7f - lengthFields := 0 - switch { - case b <= 125: // Payload length 7bits. - hybiFrame.header.Length = int64(b) - case b == 126: // Payload length 7+16bits - lengthFields = 2 - case b == 127: // Payload length 7+64bits - lengthFields = 8 - } - for i := 0; i < lengthFields; i++ { - b, err = buf.ReadByte() - if err != nil { - return - } - if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits - b &= 0x7f - } - header = append(header, b) - hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) - } - if mask { - // Masking key. 4 bytes. - for i := 0; i < 4; i++ { - b, err = buf.ReadByte() - if err != nil { - return - } - header = append(header, b) - hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) - } - } - hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) - hybiFrame.header.data = bytes.NewBuffer(header) - hybiFrame.length = len(header) + int(hybiFrame.header.Length) - return -} - -// A HybiFrameWriter is a writer for hybi frame. -type hybiFrameWriter struct { - writer *bufio.Writer - - header *hybiFrameHeader -} - -func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { - var header []byte - var b byte - if frame.header.Fin { - b |= 0x80 - } - for i := 0; i < 3; i++ { - if frame.header.Rsv[i] { - j := uint(6 - i) - b |= 1 << j - } - } - b |= frame.header.OpCode - header = append(header, b) - if frame.header.MaskingKey != nil { - b = 0x80 - } else { - b = 0 - } - lengthFields := 0 - length := len(msg) - switch { - case length <= 125: - b |= byte(length) - case length < 65536: - b |= 126 - lengthFields = 2 - default: - b |= 127 - lengthFields = 8 - } - header = append(header, b) - for i := 0; i < lengthFields; i++ { - j := uint((lengthFields - i - 1) * 8) - b = byte((length >> j) & 0xff) - header = append(header, b) - } - if frame.header.MaskingKey != nil { - if len(frame.header.MaskingKey) != 4 { - return 0, ErrBadMaskingKey - } - header = append(header, frame.header.MaskingKey...) - frame.writer.Write(header) - data := make([]byte, length) - for i := range data { - data[i] = msg[i] ^ frame.header.MaskingKey[i%4] - } - frame.writer.Write(data) - err = frame.writer.Flush() - return length, err - } - frame.writer.Write(header) - frame.writer.Write(msg) - err = frame.writer.Flush() - return length, err -} - -func (frame *hybiFrameWriter) Close() error { return nil } - -type hybiFrameWriterFactory struct { - *bufio.Writer - needMaskingKey bool -} - -func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { - frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} - if buf.needMaskingKey { - frameHeader.MaskingKey, err = generateMaskingKey() - if err != nil { - return nil, err - } - } - return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil -} - -type hybiFrameHandler struct { - conn *Conn - payloadType byte -} - -func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { - if handler.conn.IsServerConn() { - // The client MUST mask all frames sent to the server. - if frame.(*hybiFrameReader).header.MaskingKey == nil { - handler.WriteClose(closeStatusProtocolError) - return nil, io.EOF - } - } else { - // The server MUST NOT mask all frames. - if frame.(*hybiFrameReader).header.MaskingKey != nil { - handler.WriteClose(closeStatusProtocolError) - return nil, io.EOF - } - } - if header := frame.HeaderReader(); header != nil { - io.Copy(ioutil.Discard, header) - } - switch frame.PayloadType() { - case ContinuationFrame: - frame.(*hybiFrameReader).header.OpCode = handler.payloadType - case TextFrame, BinaryFrame: - handler.payloadType = frame.PayloadType() - case CloseFrame: - return nil, io.EOF - case PingFrame, PongFrame: - b := make([]byte, maxControlFramePayloadLength) - n, err := io.ReadFull(frame, b) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return nil, err - } - io.Copy(ioutil.Discard, frame) - if frame.PayloadType() == PingFrame { - if _, err := handler.WritePong(b[:n]); err != nil { - return nil, err - } - } - return nil, nil - } - return frame, nil -} - -func (handler *hybiFrameHandler) WriteClose(status int) (err error) { - handler.conn.wio.Lock() - defer handler.conn.wio.Unlock() - w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) - if err != nil { - return err - } - msg := make([]byte, 2) - binary.BigEndian.PutUint16(msg, uint16(status)) - _, err = w.Write(msg) - w.Close() - return err -} - -func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { - handler.conn.wio.Lock() - defer handler.conn.wio.Unlock() - w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) - if err != nil { - return 0, err - } - n, err = w.Write(msg) - w.Close() - return n, err -} - -// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. -func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { - if buf == nil { - br := bufio.NewReader(rwc) - bw := bufio.NewWriter(rwc) - buf = bufio.NewReadWriter(br, bw) - } - ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, - frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, - frameWriterFactory: hybiFrameWriterFactory{ - buf.Writer, request == nil}, - PayloadType: TextFrame, - defaultCloseStatus: closeStatusNormal} - ws.frameHandler = &hybiFrameHandler{conn: ws} - return ws -} - -// generateMaskingKey generates a masking key for a frame. -func generateMaskingKey() (maskingKey []byte, err error) { - maskingKey = make([]byte, 4) - if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { - return - } - return -} - -// generateNonce generates a nonce consisting of a randomly selected 16-byte -// value that has been base64-encoded. -func generateNonce() (nonce []byte) { - key := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, key); err != nil { - panic(err) - } - nonce = make([]byte, 24) - base64.StdEncoding.Encode(nonce, key) - return -} - -// removeZone removes IPv6 zone identifer from host. -// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" -func removeZone(host string) string { - if !strings.HasPrefix(host, "[") { - return host - } - i := strings.LastIndex(host, "]") - if i < 0 { - return host - } - j := strings.LastIndex(host[:i], "%") - if j < 0 { - return host - } - return host[:j] + host[i:] -} - -// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of -// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. -func getNonceAccept(nonce []byte) (expected []byte, err error) { - h := sha1.New() - if _, err = h.Write(nonce); err != nil { - return - } - if _, err = h.Write([]byte(websocketGUID)); err != nil { - return - } - expected = make([]byte, 28) - base64.StdEncoding.Encode(expected, h.Sum(nil)) - return -} - -// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 -func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { - bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") - - // According to RFC 6874, an HTTP client, proxy, or other - // intermediary must remove any IPv6 zone identifier attached - // to an outgoing URI. - bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") - bw.WriteString("Upgrade: websocket\r\n") - bw.WriteString("Connection: Upgrade\r\n") - nonce := generateNonce() - if config.handshakeData != nil { - nonce = []byte(config.handshakeData["key"]) - } - bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") - bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") - - if config.Version != ProtocolVersionHybi13 { - return ErrBadProtocolVersion - } - - bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") - if len(config.Protocol) > 0 { - bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") - } - // TODO(ukai): send Sec-WebSocket-Extensions. - err = config.Header.WriteSubset(bw, handshakeHeader) - if err != nil { - return err - } - - bw.WriteString("\r\n") - if err = bw.Flush(); err != nil { - return err - } - - resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) - if err != nil { - return err - } - if resp.StatusCode != 101 { - return ErrBadStatus - } - if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || - strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { - return ErrBadUpgrade - } - expectedAccept, err := getNonceAccept(nonce) - if err != nil { - return err - } - if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { - return ErrChallengeResponse - } - if resp.Header.Get("Sec-WebSocket-Extensions") != "" { - return ErrUnsupportedExtensions - } - offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") - if offeredProtocol != "" { - protocolMatched := false - for i := 0; i < len(config.Protocol); i++ { - if config.Protocol[i] == offeredProtocol { - protocolMatched = true - break - } - } - if !protocolMatched { - return ErrBadWebSocketProtocol - } - config.Protocol = []string{offeredProtocol} - } - - return nil -} - -// newHybiClientConn creates a client WebSocket connection after handshake. -func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { - return newHybiConn(config, buf, rwc, nil) -} - -// A HybiServerHandshaker performs a server handshake using hybi draft protocol. -type hybiServerHandshaker struct { - *Config - accept []byte -} - -func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { - c.Version = ProtocolVersionHybi13 - if req.Method != "GET" { - return http.StatusMethodNotAllowed, ErrBadRequestMethod - } - // HTTP version can be safely ignored. - - if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || - !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { - return http.StatusBadRequest, ErrNotWebSocket - } - - key := req.Header.Get("Sec-Websocket-Key") - if key == "" { - return http.StatusBadRequest, ErrChallengeResponse - } - version := req.Header.Get("Sec-Websocket-Version") - switch version { - case "13": - c.Version = ProtocolVersionHybi13 - default: - return http.StatusBadRequest, ErrBadWebSocketVersion - } - var scheme string - if req.TLS != nil { - scheme = "wss" - } else { - scheme = "ws" - } - c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) - if err != nil { - return http.StatusBadRequest, err - } - protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) - if protocol != "" { - protocols := strings.Split(protocol, ",") - for i := 0; i < len(protocols); i++ { - c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) - } - } - c.accept, err = getNonceAccept([]byte(key)) - if err != nil { - return http.StatusInternalServerError, err - } - return http.StatusSwitchingProtocols, nil -} - -// Origin parses the Origin header in req. -// If the Origin header is not set, it returns nil and nil. -func Origin(config *Config, req *http.Request) (*url.URL, error) { - var origin string - switch config.Version { - case ProtocolVersionHybi13: - origin = req.Header.Get("Origin") - } - if origin == "" { - return nil, nil - } - return url.ParseRequestURI(origin) -} - -func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { - if len(c.Protocol) > 0 { - if len(c.Protocol) != 1 { - // You need choose a Protocol in Handshake func in Server. - return ErrBadWebSocketProtocol - } - } - buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") - buf.WriteString("Upgrade: websocket\r\n") - buf.WriteString("Connection: Upgrade\r\n") - buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") - if len(c.Protocol) > 0 { - buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") - } - // TODO(ukai): send Sec-WebSocket-Extensions. - if c.Header != nil { - err := c.Header.WriteSubset(buf, handshakeHeader) - if err != nil { - return err - } - } - buf.WriteString("\r\n") - return buf.Flush() -} - -func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { - return newHybiServerConn(c.Config, buf, rwc, request) -} - -// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. -func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { - return newHybiConn(config, buf, rwc, request) -} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go deleted file mode 100644 index 0895dea19..000000000 --- a/vendor/golang.org/x/net/websocket/server.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "fmt" - "io" - "net/http" -) - -func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { - var hs serverHandshaker = &hybiServerHandshaker{Config: config} - code, err := hs.ReadHandshake(buf.Reader, req) - if err == ErrBadWebSocketVersion { - fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) - fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) - buf.WriteString("\r\n") - buf.WriteString(err.Error()) - buf.Flush() - return - } - if err != nil { - fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) - buf.WriteString("\r\n") - buf.WriteString(err.Error()) - buf.Flush() - return - } - if handshake != nil { - err = handshake(config, req) - if err != nil { - code = http.StatusForbidden - fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) - buf.WriteString("\r\n") - buf.Flush() - return - } - } - err = hs.AcceptHandshake(buf.Writer) - if err != nil { - code = http.StatusBadRequest - fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) - buf.WriteString("\r\n") - buf.Flush() - return - } - conn = hs.NewServerConn(buf, rwc, req) - return -} - -// Server represents a server of a WebSocket. -type Server struct { - // Config is a WebSocket configuration for new WebSocket connection. - Config - - // Handshake is an optional function in WebSocket handshake. - // For example, you can check, or don't check Origin header. - // Another example, you can select config.Protocol. - Handshake func(*Config, *http.Request) error - - // Handler handles a WebSocket connection. - Handler -} - -// ServeHTTP implements the http.Handler interface for a WebSocket -func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { - s.serveWebSocket(w, req) -} - -func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { - rwc, buf, err := w.(http.Hijacker).Hijack() - if err != nil { - panic("Hijack failed: " + err.Error()) - } - // The server should abort the WebSocket connection if it finds - // the client did not send a handshake that matches with protocol - // specification. - defer rwc.Close() - conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) - if err != nil { - return - } - if conn == nil { - panic("unexpected nil conn") - } - s.Handler(conn) -} - -// Handler is a simple interface to a WebSocket browser client. -// It checks if Origin header is valid URL by default. -// You might want to verify websocket.Conn.Config().Origin in the func. -// If you use Server instead of Handler, you could call websocket.Origin and -// check the origin in your Handshake func. So, if you want to accept -// non-browser clients, which do not send an Origin header, set a -// Server.Handshake that does not check the origin. -type Handler func(*Conn) - -func checkOrigin(config *Config, req *http.Request) (err error) { - config.Origin, err = Origin(config, req) - if err == nil && config.Origin == nil { - return fmt.Errorf("null origin") - } - return err -} - -// ServeHTTP implements the http.Handler interface for a WebSocket -func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - s := Server{Handler: h, Handshake: checkOrigin} - s.serveWebSocket(w, req) -} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go deleted file mode 100644 index 606840098..000000000 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements a client and server for the WebSocket protocol -// as specified in RFC 6455. -package websocket // import "golang.org/x/net/websocket" - -import ( - "bufio" - "crypto/tls" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "sync" - "time" -) - -const ( - ProtocolVersionHybi13 = 13 - ProtocolVersionHybi = ProtocolVersionHybi13 - SupportedProtocolVersion = "13" - - ContinuationFrame = 0 - TextFrame = 1 - BinaryFrame = 2 - CloseFrame = 8 - PingFrame = 9 - PongFrame = 10 - UnknownFrame = 255 -) - -// ProtocolError represents WebSocket protocol errors. -type ProtocolError struct { - ErrorString string -} - -func (err *ProtocolError) Error() string { return err.ErrorString } - -var ( - ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} - ErrBadScheme = &ProtocolError{"bad scheme"} - ErrBadStatus = &ProtocolError{"bad status"} - ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} - ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} - ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} - ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} - ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} - ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} - ErrBadFrame = &ProtocolError{"bad frame"} - ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} - ErrNotWebSocket = &ProtocolError{"not websocket protocol"} - ErrBadRequestMethod = &ProtocolError{"bad method"} - ErrNotSupported = &ProtocolError{"not supported"} -) - -// Addr is an implementation of net.Addr for WebSocket. -type Addr struct { - *url.URL -} - -// Network returns the network type for a WebSocket, "websocket". -func (addr *Addr) Network() string { return "websocket" } - -// Config is a WebSocket configuration -type Config struct { - // A WebSocket server address. - Location *url.URL - - // A Websocket client origin. - Origin *url.URL - - // WebSocket subprotocols. - Protocol []string - - // WebSocket protocol version. - Version int - - // TLS config for secure WebSocket (wss). - TlsConfig *tls.Config - - // Additional header fields to be sent in WebSocket opening handshake. - Header http.Header - - handshakeData map[string]string -} - -// serverHandshaker is an interface to handle WebSocket server side handshake. -type serverHandshaker interface { - // ReadHandshake reads handshake request message from client. - // Returns http response code and error if any. - ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) - - // AcceptHandshake accepts the client handshake request and sends - // handshake response back to client. - AcceptHandshake(buf *bufio.Writer) (err error) - - // NewServerConn creates a new WebSocket connection. - NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) -} - -// frameReader is an interface to read a WebSocket frame. -type frameReader interface { - // Reader is to read payload of the frame. - io.Reader - - // PayloadType returns payload type. - PayloadType() byte - - // HeaderReader returns a reader to read header of the frame. - HeaderReader() io.Reader - - // TrailerReader returns a reader to read trailer of the frame. - // If it returns nil, there is no trailer in the frame. - TrailerReader() io.Reader - - // Len returns total length of the frame, including header and trailer. - Len() int -} - -// frameReaderFactory is an interface to creates new frame reader. -type frameReaderFactory interface { - NewFrameReader() (r frameReader, err error) -} - -// frameWriter is an interface to write a WebSocket frame. -type frameWriter interface { - // Writer is to write payload of the frame. - io.WriteCloser -} - -// frameWriterFactory is an interface to create new frame writer. -type frameWriterFactory interface { - NewFrameWriter(payloadType byte) (w frameWriter, err error) -} - -type frameHandler interface { - HandleFrame(frame frameReader) (r frameReader, err error) - WriteClose(status int) (err error) -} - -// Conn represents a WebSocket connection. -type Conn struct { - config *Config - request *http.Request - - buf *bufio.ReadWriter - rwc io.ReadWriteCloser - - rio sync.Mutex - frameReaderFactory - frameReader - - wio sync.Mutex - frameWriterFactory - - frameHandler - PayloadType byte - defaultCloseStatus int -} - -// Read implements the io.Reader interface: -// it reads data of a frame from the WebSocket connection. -// if msg is not large enough for the frame data, it fills the msg and next Read -// will read the rest of the frame data. -// it reads Text frame or Binary frame. -func (ws *Conn) Read(msg []byte) (n int, err error) { - ws.rio.Lock() - defer ws.rio.Unlock() -again: - if ws.frameReader == nil { - frame, err := ws.frameReaderFactory.NewFrameReader() - if err != nil { - return 0, err - } - ws.frameReader, err = ws.frameHandler.HandleFrame(frame) - if err != nil { - return 0, err - } - if ws.frameReader == nil { - goto again - } - } - n, err = ws.frameReader.Read(msg) - if err == io.EOF { - if trailer := ws.frameReader.TrailerReader(); trailer != nil { - io.Copy(ioutil.Discard, trailer) - } - ws.frameReader = nil - goto again - } - return n, err -} - -// Write implements the io.Writer interface: -// it writes data as a frame to the WebSocket connection. -func (ws *Conn) Write(msg []byte) (n int, err error) { - ws.wio.Lock() - defer ws.wio.Unlock() - w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) - if err != nil { - return 0, err - } - n, err = w.Write(msg) - w.Close() - if err != nil { - return n, err - } - return n, err -} - -// Close implements the io.Closer interface. -func (ws *Conn) Close() error { - err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) - err1 := ws.rwc.Close() - if err != nil { - return err - } - return err1 -} - -func (ws *Conn) IsClientConn() bool { return ws.request == nil } -func (ws *Conn) IsServerConn() bool { return ws.request != nil } - -// LocalAddr returns the WebSocket Origin for the connection for client, or -// the WebSocket location for server. -func (ws *Conn) LocalAddr() net.Addr { - if ws.IsClientConn() { - return &Addr{ws.config.Origin} - } - return &Addr{ws.config.Location} -} - -// RemoteAddr returns the WebSocket location for the connection for client, or -// the Websocket Origin for server. -func (ws *Conn) RemoteAddr() net.Addr { - if ws.IsClientConn() { - return &Addr{ws.config.Location} - } - return &Addr{ws.config.Origin} -} - -var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") - -// SetDeadline sets the connection's network read & write deadlines. -func (ws *Conn) SetDeadline(t time.Time) error { - if conn, ok := ws.rwc.(net.Conn); ok { - return conn.SetDeadline(t) - } - return errSetDeadline -} - -// SetReadDeadline sets the connection's network read deadline. -func (ws *Conn) SetReadDeadline(t time.Time) error { - if conn, ok := ws.rwc.(net.Conn); ok { - return conn.SetReadDeadline(t) - } - return errSetDeadline -} - -// SetWriteDeadline sets the connection's network write deadline. -func (ws *Conn) SetWriteDeadline(t time.Time) error { - if conn, ok := ws.rwc.(net.Conn); ok { - return conn.SetWriteDeadline(t) - } - return errSetDeadline -} - -// Config returns the WebSocket config. -func (ws *Conn) Config() *Config { return ws.config } - -// Request returns the http request upgraded to the WebSocket. -// It is nil for client side. -func (ws *Conn) Request() *http.Request { return ws.request } - -// Codec represents a symmetric pair of functions that implement a codec. -type Codec struct { - Marshal func(v interface{}) (data []byte, payloadType byte, err error) - Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) -} - -// Send sends v marshaled by cd.Marshal as single frame to ws. -func (cd Codec) Send(ws *Conn, v interface{}) (err error) { - data, payloadType, err := cd.Marshal(v) - if err != nil { - return err - } - ws.wio.Lock() - defer ws.wio.Unlock() - w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) - if err != nil { - return err - } - _, err = w.Write(data) - w.Close() - return err -} - -// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v. -func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { - ws.rio.Lock() - defer ws.rio.Unlock() - if ws.frameReader != nil { - _, err = io.Copy(ioutil.Discard, ws.frameReader) - if err != nil { - return err - } - ws.frameReader = nil - } -again: - frame, err := ws.frameReaderFactory.NewFrameReader() - if err != nil { - return err - } - frame, err = ws.frameHandler.HandleFrame(frame) - if err != nil { - return err - } - if frame == nil { - goto again - } - payloadType := frame.PayloadType() - data, err := ioutil.ReadAll(frame) - if err != nil { - return err - } - return cd.Unmarshal(data, payloadType, v) -} - -func marshal(v interface{}) (msg []byte, payloadType byte, err error) { - switch data := v.(type) { - case string: - return []byte(data), TextFrame, nil - case []byte: - return data, BinaryFrame, nil - } - return nil, UnknownFrame, ErrNotSupported -} - -func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { - switch data := v.(type) { - case *string: - *data = string(msg) - return nil - case *[]byte: - *data = msg - return nil - } - return ErrNotSupported -} - -/* -Message is a codec to send/receive text/binary data in a frame on WebSocket connection. -To send/receive text frame, use string type. -To send/receive binary frame, use []byte type. - -Trivial usage: - - import "websocket" - - // receive text frame - var message string - websocket.Message.Receive(ws, &message) - - // send text frame - message = "hello" - websocket.Message.Send(ws, message) - - // receive binary frame - var data []byte - websocket.Message.Receive(ws, &data) - - // send binary frame - data = []byte{0, 1, 2} - websocket.Message.Send(ws, data) - -*/ -var Message = Codec{marshal, unmarshal} - -func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { - msg, err = json.Marshal(v) - return msg, TextFrame, err -} - -func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { - return json.Unmarshal(msg, v) -} - -/* -JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. - -Trivial usage: - - import "websocket" - - type T struct { - Msg string - Count int - } - - // receive JSON type T - var data T - websocket.JSON.Receive(ws, &data) - - // send JSON type T - websocket.JSON.Send(ws, data) -*/ -var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go deleted file mode 100644 index 9a4f8d59e..000000000 --- a/vendor/golang.org/x/sync/singleflight/singleflight.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package singleflight provides a duplicate function call suppression -// mechanism. -package singleflight // import "golang.org/x/sync/singleflight" - -import "sync" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - c.val, c.err = fn() - c.wg.Done() - - g.mu.Lock() - delete(g.m, key) - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - g.mu.Unlock() -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/vendor.json b/vendor/vendor.json deleted file mode 100644 index dec0970af..000000000 --- a/vendor/vendor.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - {"path":"github.com/armon/go-proxyproto","checksumSHA1":"eAall4ACaMG40mzSJ5Oc95GiF1A=","revision":"609d6338d3a76ec26ac3fe7045a164d9a58436e7","revisionTime":"2015-02-06T18:58:55-08:00"}, - {"path":"github.com/circonus-labs/circonus-gometrics","checksumSHA1":"ZAdLZ0e/hin4AXxdS9F8y0yi/bg=","revision":"f8c68ed96a065c10344dcaf802f608781fc0a981","revisionTime":"2016-08-30T16:47:25Z"}, - {"path":"github.com/circonus-labs/circonus-gometrics/api","checksumSHA1":"2hV0W0wM75u+OJ4kGv0i7B95cmY=","revision":"f8c68ed96a065c10344dcaf802f608781fc0a981","revisionTime":"2016-08-30T16:47:25Z"}, - {"path":"github.com/circonus-labs/circonus-gometrics/checkmgr","checksumSHA1":"r3XLF0s89Y7UJgS9YP3NbdyafNY=","revision":"f8c68ed96a065c10344dcaf802f608781fc0a981","revisionTime":"2016-08-30T16:47:25Z"}, - {"path":"github.com/circonus-labs/circonusllhist","checksumSHA1":"C4Z7+l5GOpOCW5DcvNYzheGvQRE=","revision":"d724266ae5270ae8b87a5d2e8081f04e307c3c18","revisionTime":"2016-05-26T04:38:13Z"}, - {"path":"github.com/cyberdelia/go-metrics-graphite","checksumSHA1":"8/Q1JbAHUmL4sDURLq6yron4K/I=","revision":"b8345b7f01d571b05366d5791a034d872f1bb36f","revisionTime":"2015-08-25T20:22:00-07:00"}, - {"path":"github.com/fatih/structs","checksumSHA1":"zKLQNNEpgfrJOwVrDDyBMYEIx/w=","revision":"5ada2f449b108d87dbd8c1e60c32cdd065c27886","revisionTime":"2016-06-01T09:31:17Z"}, - {"path":"github.com/gobwas/glob","checksumSHA1":"/RE0VzeaXdaBUNeNMUXWqzly7qY=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/compiler","checksumSHA1":"UIxNdlqa2kcYi5EEe79/WzyPiz4=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/match","checksumSHA1":"jNdbCxBabqjv9tREXA4Nx45Y4wg=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/syntax","checksumSHA1":"2KhDNUE98XhHnIgg2S9E4gcWoig=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/syntax/ast","checksumSHA1":"FPIMdPSazNZGNU+ZmPtIeZpVHFU=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/syntax/lexer","checksumSHA1":"umyztSrQrQIl9JgwhDDb6wrSLeI=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/util/runes","checksumSHA1":"vux29fN5O22F3QPLOnjQ37278vg=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/gobwas/glob/util/strings","checksumSHA1":"Sxp2AHWkkLkIq6w9aMcLmzcwD6w=","revision":"19c076cdf202b3d1c0489bdfa2f2f289f634474b","revisionTime":"2018-02-08T21:18:42Z"}, - {"path":"github.com/hashicorp/consul/api","checksumSHA1":"GsJ84gKbQno8KbojhVTgSVWNues=","revision":"402636ff2db998edef392ac6d59210d2170b3ebf","revisionTime":"2017-04-05T04:22:14Z","version":"v0.8.0","versionExact":"v0.8.0"}, - {"path":"github.com/hashicorp/errwrap","checksumSHA1":"cdOCt0Yb+hdErz8NAQqayxPmRsY=","revision":"7554cd9344cec97297fa6649b055a8c98c2a1e55","revisionTime":"2014-10-27T22:47:10-07:00"}, - {"path":"github.com/hashicorp/go-cleanhttp","checksumSHA1":"Uzyon2091lmwacNsl1hCytjhHtg=","revision":"ad28ea4487f05916463e2423a55166280e8254b5","revisionTime":"2016-04-07T17:41:26Z"}, - {"path":"github.com/hashicorp/go-multierror","checksumSHA1":"lrSl49G23l6NhfilxPM0XFs5rZo=","revision":"d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5","revisionTime":"2015-09-16T13:57:42-07:00"}, - {"path":"github.com/hashicorp/go-retryablehttp","checksumSHA1":"GBDE1KDl/7c5hlRPYRZ7+C0WQ0g=","revision":"f4ed9b0fa01a2ac614afe7c897ed2e3d8208f3e8","revisionTime":"2016-08-10T17:22:55Z"}, - {"path":"github.com/hashicorp/go-rootcerts","checksumSHA1":"A1PcINvF3UiwHRKn8UcgARgvGRs=","revision":"6bb64b370b90e7ef1fa532be9e591a81c3493e00","revisionTime":"2016-05-03T09:34:40-05:00"}, - {"path":"github.com/hashicorp/hcl","checksumSHA1":"5LrCq/ydlbL6pq1cdmuxiw7QV98=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/hcl/ast","checksumSHA1":"WP5ODRo9+USuAsKYPu5RW3q8Epg=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/hcl/parser","checksumSHA1":"SVw0qm83Anf5T2GkYabn+f7Ibv0=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/hcl/scanner","checksumSHA1":"WZM0q7Sya8PcGj607x1npgcEPa4=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/hcl/strconv","checksumSHA1":"ETy6J7OZ7zjBNq0V0Lm8E/7Xafw=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/hcl/token","checksumSHA1":"VGNVyGYDGTTCTRqGH5R+CL0u6xw=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/json/parser","checksumSHA1":"uEaK2zagnGctsUmjWt8ZYmK1Yvs=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/json/scanner","checksumSHA1":"S1e0F9ZKSnqgOLfjDTYazRL28tA=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/hcl/json/token","checksumSHA1":"fNlXQCQEnb+B3k5UDL/r15xtSJY=","revision":"d7400db7143f8e869812e50a53acd6c8d92af3b8","revisionTime":"2016-06-07T00:19:40Z"}, - {"path":"github.com/hashicorp/serf/coordinate","checksumSHA1":"o8In5byYGDCY/mnTuV4Tfmci+3w=","revision":"0df3e3df1703f838243a7f3f12bf0b88566ade5a","revisionTime":"2015-12-21T18:49:50Z","version":"v0.6.4","versionExact":"v0.6.4"}, - {"path":"github.com/hashicorp/vault/api","checksumSHA1":"9/RHDTjqflfK3/f2Tfvwx+82I40=","revision":"f627c01df8d7bebb403cf899ca1beb24f5fc84cd","revisionTime":"2016-06-14T07:51:08Z","version":"v0.6.0","versionExact":"v0.6.0"}, - {"path":"github.com/magiconair/properties","checksumSHA1":"8ae1DyNE/yY9NvY3PmvtQdLBJnc=","revision":"49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934","revisionTime":"2017-10-31T21:05:36Z"}, - {"path":"github.com/mitchellh/go-homedir","checksumSHA1":"AXacfEchaUqT5RGmPmMXsOWRhv8=","revision":"1111e456ffea841564ac0fa5f69c26ef44dafec9","revisionTime":"2016-06-06T03:01:22Z"}, - {"path":"github.com/mitchellh/mapstructure","checksumSHA1":"KCJhN9Dx329wAN/SeL4CxeypPyk=","revision":"d2dd0262208475919e1a362f675cfc0e7c10e905","revisionTime":"2016-02-11T19:18:39-08:00"}, - {"path":"github.com/pascaldekloe/goe/verify","checksumSHA1":"4tr8yNUt5DB8GXc5y+uq6J7TJ54=","revision":"f99183613f483cd9b8c79359d572836e243e0763","revisionTime":"2015-07-19T21:56:08+02:00"}, - {"path":"github.com/pkg/profile","checksumSHA1":"C3yiSMdTQxSY3xqKJzMV9T+KnIc=","revision":"5b67d428864e92711fcbd2f8629456121a56d91f","revisionTime":"2017-05-09T09:25:25Z"}, - {"path":"github.com/pubnub/go-metrics-statsd","checksumSHA1":"xiXWXI7iNZ4Vkr/ayWU3KsGnRo0=","revision":"7da61f429d6bdaa79d5d5746998e0db9622c56fc","revisionTime":"2017-01-24T01:40:03Z"}, - {"path":"github.com/rcrowley/go-metrics","checksumSHA1":"an5RM8wjgPPloolUUYkvEncbHu4=","revision":"e2704e165165ec55d062f5919b4b29494e9fa790","revisionTime":"2018-05-03T17:46:38Z"}, - {"path":"github.com/rogpeppe/fastuuid","checksumSHA1":"ehRkDJisGCCSYdNgyvs1gSywSPE=","revision":"6724a57986aff9bff1a1770e9347036def7c89f6","revisionTime":"2015-01-06T09:31:45Z"}, - {"path":"github.com/sergi/go-diff/diffmatchpatch","checksumSHA1":"iWCtyR1TkJ22Bi/ygzfKDvOQdQY=","revision":"24e2351369ec4949b2ed0dc5c477afdd4c4034e8","revisionTime":"2017-01-18T13:12:30Z"}, - {"path":"golang.org/x/net/http2","checksumSHA1":"N1akwAdrHVfPPrsFOhG2ouP21VA=","revision":"f2499483f923065a842d38eb4c7f1927e6fc6e6d","revisionTime":"2017-01-14T04:22:49Z"}, - {"path":"golang.org/x/net/http2/hpack","checksumSHA1":"HzuGD7AwgC0p1az1WAQnEFnEk98=","revision":"f2499483f923065a842d38eb4c7f1927e6fc6e6d","revisionTime":"2017-01-14T04:22:49Z"}, - {"path":"golang.org/x/net/idna","checksumSHA1":"GIGmSrYACByf5JDIP9ByBZksY80=","revision":"f2499483f923065a842d38eb4c7f1927e6fc6e6d","revisionTime":"2017-01-14T04:22:49Z"}, - {"path":"golang.org/x/net/lex/httplex","checksumSHA1":"3xyuaSNmClqG4YWC7g0isQIbUTc=","revision":"f2499483f923065a842d38eb4c7f1927e6fc6e6d","revisionTime":"2017-01-14T04:22:49Z"}, - {"path":"golang.org/x/net/websocket","checksumSHA1":"xvuVB+aMh4udpinGsmtK31pcWC0=","revision":"9946ad7d5eae91d8edca4f54d1a1e130a052e823","revisionTime":"2015-10-20T22:50:55Z"}, - {"path":"golang.org/x/sync/singleflight","checksumSHA1":"VhUZFUuhLFSBFUfskMC4am5RIdc=","revision":"8e0aa688b654ef28caa72506fa5ec8dba9fc7690","revisionTime":"2017-07-19T03:38:01Z"} - ], - "rootPath": "github.com/fabiolb/fabio" -} From 58502cacddaa905be3e38b696f5ab99e3add8a2d Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 21:55:42 +0200 Subject: [PATCH 11/44] create vendor folder from go.mod --- go.mod | 22 +- go.sum | 52 + .../github.com/armon/go-proxyproto/.gitignore | 2 + vendor/github.com/armon/go-proxyproto/LICENSE | 21 + .../github.com/armon/go-proxyproto/README.md | 36 + .../armon/go-proxyproto/protocol.go | 194 ++ .../circonus-gometrics/.gitignore | 3 + .../circonus-labs/circonus-gometrics/LICENSE | 28 + .../circonus-gometrics/README.md | 181 ++ .../circonus-gometrics/api/api.go | 214 ++ .../circonus-gometrics/api/broker.go | 89 + .../circonus-gometrics/api/check.go | 113 + .../circonus-gometrics/api/checkbundle.go | 132 + .../circonus-gometrics/checkmgr/broker.go | 199 ++ .../circonus-gometrics/checkmgr/cert.go | 87 + .../circonus-gometrics/checkmgr/check.go | 211 ++ .../circonus-gometrics/checkmgr/checkmgr.go | 365 +++ .../circonus-gometrics/checkmgr/metrics.go | 79 + .../circonus-gometrics/circonus-gometrics.go | 254 ++ .../circonus-gometrics/counter.go | 55 + .../circonus-labs/circonus-gometrics/gauge.go | 81 + .../circonus-gometrics/histogram.go | 77 + .../circonus-gometrics/submit.go | 126 + .../circonus-labs/circonus-gometrics/text.go | 41 + .../circonus-labs/circonus-gometrics/tools.go | 23 + .../circonus-labs/circonus-gometrics/util.go | 100 + .../circonus-labs/circonusllhist/LICENSE | 28 + .../circonusllhist/circonusllhist.go | 555 ++++ .../cyberdelia/go-metrics-graphite/AUTHORS | 8 + .../cyberdelia/go-metrics-graphite/LICENSE | 22 + .../cyberdelia/go-metrics-graphite/README.md | 19 + .../go-metrics-graphite/graphite.go | 113 + vendor/github.com/fatih/structs/.gitignore | 23 + vendor/github.com/fatih/structs/.travis.yml | 11 + vendor/github.com/fatih/structs/LICENSE | 21 + vendor/github.com/fatih/structs/README.md | 163 + vendor/github.com/fatih/structs/field.go | 141 + vendor/github.com/fatih/structs/structs.go | 507 +++ vendor/github.com/fatih/structs/tags.go | 32 + vendor/github.com/gobwas/glob/.gitignore | 8 + vendor/github.com/gobwas/glob/.travis.yml | 11 + vendor/github.com/gobwas/glob/LICENSE | 21 + vendor/github.com/gobwas/glob/bench.sh | 26 + .../gobwas/glob/compiler/compiler.go | 525 ++++ vendor/github.com/gobwas/glob/glob.go | 80 + vendor/github.com/gobwas/glob/match/any.go | 45 + vendor/github.com/gobwas/glob/match/any_of.go | 82 + vendor/github.com/gobwas/glob/match/btree.go | 185 ++ .../github.com/gobwas/glob/match/contains.go | 58 + .../github.com/gobwas/glob/match/every_of.go | 99 + vendor/github.com/gobwas/glob/match/list.go | 49 + vendor/github.com/gobwas/glob/match/match.go | 81 + vendor/github.com/gobwas/glob/match/max.go | 49 + vendor/github.com/gobwas/glob/match/min.go | 57 + .../github.com/gobwas/glob/match/nothing.go | 27 + vendor/github.com/gobwas/glob/match/prefix.go | 50 + .../gobwas/glob/match/prefix_any.go | 55 + .../gobwas/glob/match/prefix_suffix.go | 62 + vendor/github.com/gobwas/glob/match/range.go | 48 + vendor/github.com/gobwas/glob/match/row.go | 77 + .../github.com/gobwas/glob/match/segments.go | 91 + vendor/github.com/gobwas/glob/match/single.go | 43 + vendor/github.com/gobwas/glob/match/suffix.go | 35 + .../gobwas/glob/match/suffix_any.go | 43 + vendor/github.com/gobwas/glob/match/super.go | 33 + vendor/github.com/gobwas/glob/match/text.go | 45 + vendor/github.com/gobwas/glob/readme.md | 148 + .../github.com/gobwas/glob/syntax/ast/ast.go | 122 + .../gobwas/glob/syntax/ast/parser.go | 157 + .../gobwas/glob/syntax/lexer/lexer.go | 273 ++ .../gobwas/glob/syntax/lexer/token.go | 88 + .../github.com/gobwas/glob/syntax/syntax.go | 14 + .../gobwas/glob/util/runes/runes.go | 154 + .../gobwas/glob/util/strings/strings.go | 39 + vendor/github.com/hashicorp/consul/LICENSE | 354 +++ .../github.com/hashicorp/consul/api/README.md | 43 + vendor/github.com/hashicorp/consul/api/acl.go | 140 + .../github.com/hashicorp/consul/api/agent.go | 471 +++ vendor/github.com/hashicorp/consul/api/api.go | 649 ++++ .../hashicorp/consul/api/catalog.go | 194 ++ .../hashicorp/consul/api/coordinate.go | 67 + .../github.com/hashicorp/consul/api/event.go | 104 + .../github.com/hashicorp/consul/api/health.go | 199 ++ vendor/github.com/hashicorp/consul/api/kv.go | 419 +++ .../github.com/hashicorp/consul/api/lock.go | 384 +++ .../hashicorp/consul/api/operator.go | 11 + .../hashicorp/consul/api/operator_area.go | 168 + .../consul/api/operator_autopilot.go | 215 ++ .../hashicorp/consul/api/operator_keyring.go | 83 + .../hashicorp/consul/api/operator_raft.go | 86 + .../hashicorp/consul/api/prepared_query.go | 198 ++ vendor/github.com/hashicorp/consul/api/raw.go | 24 + .../hashicorp/consul/api/semaphore.go | 512 +++ .../hashicorp/consul/api/session.go | 217 ++ .../hashicorp/consul/api/snapshot.go | 47 + .../github.com/hashicorp/consul/api/status.go | 43 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 +++ vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 +++ .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 53 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-multierror/LICENSE | 353 +++ .../hashicorp/go-multierror/README.md | 91 + .../hashicorp/go-multierror/append.go | 37 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 23 + .../hashicorp/go-multierror/multierror.go | 51 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-retryablehttp/.gitignore | 3 + .../hashicorp/go-retryablehttp/.travis.yml | 12 + .../hashicorp/go-retryablehttp/LICENSE | 363 +++ .../hashicorp/go-retryablehttp/Makefile | 11 + .../hashicorp/go-retryablehttp/README.md | 43 + .../hashicorp/go-retryablehttp/client.go | 302 ++ .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 +++ .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 43 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../hashicorp/go-rootcerts/rootcerts.go | 103 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + vendor/github.com/hashicorp/hcl/.gitignore | 9 + vendor/github.com/hashicorp/hcl/.travis.yml | 3 + vendor/github.com/hashicorp/hcl/LICENSE | 354 +++ vendor/github.com/hashicorp/hcl/Makefile | 17 + vendor/github.com/hashicorp/hcl/README.md | 104 + vendor/github.com/hashicorp/hcl/appveyor.yml | 16 + vendor/github.com/hashicorp/hcl/decoder.go | 654 ++++ vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 211 ++ .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 454 +++ .../hashicorp/hcl/hcl/scanner/scanner.go | 629 ++++ .../hashicorp/hcl/hcl/strconv/quote.go | 248 ++ .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 214 ++ .../hashicorp/hcl/json/parser/flatten.go | 111 + .../hashicorp/hcl/json/parser/parser.go | 297 ++ .../hashicorp/hcl/json/scanner/scanner.go | 451 +++ .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + vendor/github.com/hashicorp/hcl/lex.go | 38 + vendor/github.com/hashicorp/hcl/parse.go | 39 + vendor/github.com/hashicorp/serf/LICENSE | 354 +++ .../hashicorp/serf/coordinate/README.md | 1 + .../hashicorp/serf/coordinate/client.go | 180 ++ .../hashicorp/serf/coordinate/config.go | 70 + .../hashicorp/serf/coordinate/coordinate.go | 183 ++ .../hashicorp/serf/coordinate/phantom.go | 187 ++ .../hashicorp/serf/coordinate/test_util.go | 27 + vendor/github.com/hashicorp/vault/LICENSE | 363 +++ vendor/github.com/hashicorp/vault/api/SPEC.md | 611 ++++ vendor/github.com/hashicorp/vault/api/auth.go | 11 + .../hashicorp/vault/api/auth_token.go | 178 ++ .../github.com/hashicorp/vault/api/client.go | 333 ++ vendor/github.com/hashicorp/vault/api/help.go | 25 + .../github.com/hashicorp/vault/api/logical.go | 123 + .../github.com/hashicorp/vault/api/request.go | 71 + .../hashicorp/vault/api/response.go | 75 + .../github.com/hashicorp/vault/api/secret.go | 66 + vendor/github.com/hashicorp/vault/api/ssh.go | 38 + .../hashicorp/vault/api/ssh_agent.go | 239 ++ vendor/github.com/hashicorp/vault/api/sys.go | 11 + .../hashicorp/vault/api/sys_audit.go | 85 + .../hashicorp/vault/api/sys_auth.go | 56 + .../hashicorp/vault/api/sys_capabilities.go | 60 + .../hashicorp/vault/api/sys_generate_root.go | 77 + .../hashicorp/vault/api/sys_init.go | 51 + .../hashicorp/vault/api/sys_leader.go | 20 + .../hashicorp/vault/api/sys_lease.go | 45 + .../hashicorp/vault/api/sys_mounts.go | 113 + .../hashicorp/vault/api/sys_policy.go | 72 + .../hashicorp/vault/api/sys_rekey.go | 200 ++ .../hashicorp/vault/api/sys_rotate.go | 30 + .../hashicorp/vault/api/sys_seal.go | 56 + .../hashicorp/vault/api/sys_stepdown.go | 10 + .../magiconair/properties/.gitignore | 6 + .../magiconair/properties/.travis.yml | 9 + .../magiconair/properties/CHANGELOG.md | 104 + .../github.com/magiconair/properties/LICENSE | 25 + .../magiconair/properties/README.md | 100 + .../magiconair/properties/decode.go | 289 ++ .../github.com/magiconair/properties/doc.go | 156 + .../magiconair/properties/integrate.go | 34 + .../github.com/magiconair/properties/lex.go | 407 +++ .../github.com/magiconair/properties/load.go | 241 ++ .../magiconair/properties/parser.go | 95 + .../magiconair/properties/properties.go | 811 +++++ .../magiconair/properties/rangecheck.go | 31 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + .../mitchellh/go-homedir/homedir.go | 137 + .../mitchellh/mapstructure/.travis.yml | 7 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 151 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 767 +++++ .../pascaldekloe/goe/verify/values.go | 168 + .../pascaldekloe/goe/verify/verify.go | 73 + .../pubnub/go-metrics-statsd/LICENSE | 21 + .../pubnub/go-metrics-statsd/README.md | 18 + .../pubnub/go-metrics-statsd/statsd.go | 117 + .../github.com/rcrowley/go-metrics/.gitignore | 9 + .../rcrowley/go-metrics/.travis.yml | 17 + vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + .../github.com/rcrowley/go-metrics/README.md | 168 + .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 138 + .../github.com/rcrowley/go-metrics/gauge.go | 120 + .../rcrowley/go-metrics/gauge_float64.go | 125 + .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 ++ vendor/github.com/rcrowley/go-metrics/json.go | 31 + vendor/github.com/rcrowley/go-metrics/log.go | 80 + .../github.com/rcrowley/go-metrics/memory.md | 285 ++ .../github.com/rcrowley/go-metrics/meter.go | 257 ++ .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 363 +++ .../github.com/rcrowley/go-metrics/runtime.go | 212 ++ .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 616 ++++ .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 329 ++ .../rcrowley/go-metrics/validate.sh | 10 + .../github.com/rcrowley/go-metrics/writer.go | 100 + vendor/github.com/rogpeppe/fastuuid/LICENSE | 26 + vendor/github.com/rogpeppe/fastuuid/README.md | 50 + vendor/github.com/rogpeppe/fastuuid/uuid.go | 66 + vendor/github.com/sergi/go-diff/AUTHORS | 25 + vendor/github.com/sergi/go-diff/CONTRIBUTORS | 32 + vendor/github.com/sergi/go-diff/LICENSE | 20 + .../sergi/go-diff/diffmatchpatch/diff.go | 1339 ++++++++ .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 + .../sergi/go-diff/diffmatchpatch/match.go | 160 + .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 + .../sergi/go-diff/diffmatchpatch/patch.go | 556 ++++ .../go-diff/diffmatchpatch/stringutil.go | 88 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/Dockerfile | 51 + vendor/golang.org/x/net/http2/Makefile | 3 + vendor/golang.org/x/net/http2/README | 20 + .../x/net/http2/client_conn_pool.go | 256 ++ .../x/net/http2/configure_transport.go | 80 + vendor/golang.org/x/net/http2/errors.go | 130 + vendor/golang.org/x/net/http2/fixed_buffer.go | 60 + vendor/golang.org/x/net/http2/flow.go | 50 + vendor/golang.org/x/net/http2/frame.go | 1544 +++++++++ vendor/golang.org/x/net/http2/go16.go | 43 + vendor/golang.org/x/net/http2/go17.go | 106 + vendor/golang.org/x/net/http2/go17_not18.go | 36 + vendor/golang.org/x/net/http2/go18.go | 50 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/headermap.go | 78 + vendor/golang.org/x/net/http2/hpack/encode.go | 251 ++ vendor/golang.org/x/net/http2/hpack/hpack.go | 542 ++++ .../golang.org/x/net/http2/hpack/huffman.go | 212 ++ vendor/golang.org/x/net/http2/hpack/tables.go | 352 +++ vendor/golang.org/x/net/http2/http2.go | 387 +++ vendor/golang.org/x/net/http2/not_go16.go | 46 + vendor/golang.org/x/net/http2/not_go17.go | 87 + vendor/golang.org/x/net/http2/not_go18.go | 27 + vendor/golang.org/x/net/http2/pipe.go | 153 + vendor/golang.org/x/net/http2/server.go | 2753 +++++++++++++++++ vendor/golang.org/x/net/http2/transport.go | 2129 +++++++++++++ vendor/golang.org/x/net/http2/write.go | 370 +++ vendor/golang.org/x/net/http2/writesched.go | 242 ++ .../x/net/http2/writesched_priority.go | 452 +++ .../x/net/http2/writesched_random.go | 72 + vendor/golang.org/x/net/idna/idna.go | 68 + vendor/golang.org/x/net/idna/punycode.go | 200 ++ .../golang.org/x/net/lex/httplex/httplex.go | 351 +++ vendor/golang.org/x/net/websocket/client.go | 106 + vendor/golang.org/x/net/websocket/dial.go | 24 + vendor/golang.org/x/net/websocket/hybi.go | 583 ++++ vendor/golang.org/x/net/websocket/server.go | 113 + .../golang.org/x/net/websocket/websocket.go | 448 +++ vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../x/sync/singleflight/singleflight.go | 111 + vendor/modules.txt | 73 + 297 files changed, 46202 insertions(+), 11 deletions(-) create mode 100644 go.sum create mode 100644 vendor/github.com/armon/go-proxyproto/.gitignore create mode 100644 vendor/github.com/armon/go-proxyproto/LICENSE create mode 100644 vendor/github.com/armon/go-proxyproto/README.md create mode 100644 vendor/github.com/armon/go-proxyproto/protocol.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/.gitignore create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/LICENSE create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/README.md create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/api.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/check.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/counter.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/gauge.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/histogram.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/submit.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/text.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/tools.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/util.go create mode 100644 vendor/github.com/circonus-labs/circonusllhist/LICENSE create mode 100644 vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/README.md create mode 100644 vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go create mode 100644 vendor/github.com/fatih/structs/.gitignore create mode 100644 vendor/github.com/fatih/structs/.travis.yml create mode 100644 vendor/github.com/fatih/structs/LICENSE create mode 100644 vendor/github.com/fatih/structs/README.md create mode 100644 vendor/github.com/fatih/structs/field.go create mode 100644 vendor/github.com/fatih/structs/structs.go create mode 100644 vendor/github.com/fatih/structs/tags.go create mode 100644 vendor/github.com/gobwas/glob/.gitignore create mode 100644 vendor/github.com/gobwas/glob/.travis.yml create mode 100644 vendor/github.com/gobwas/glob/LICENSE create mode 100644 vendor/github.com/gobwas/glob/bench.sh create mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go create mode 100644 vendor/github.com/gobwas/glob/glob.go create mode 100644 vendor/github.com/gobwas/glob/match/any.go create mode 100644 vendor/github.com/gobwas/glob/match/any_of.go create mode 100644 vendor/github.com/gobwas/glob/match/btree.go create mode 100644 vendor/github.com/gobwas/glob/match/contains.go create mode 100644 vendor/github.com/gobwas/glob/match/every_of.go create mode 100644 vendor/github.com/gobwas/glob/match/list.go create mode 100644 vendor/github.com/gobwas/glob/match/match.go create mode 100644 vendor/github.com/gobwas/glob/match/max.go create mode 100644 vendor/github.com/gobwas/glob/match/min.go create mode 100644 vendor/github.com/gobwas/glob/match/nothing.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/range.go create mode 100644 vendor/github.com/gobwas/glob/match/row.go create mode 100644 vendor/github.com/gobwas/glob/match/segments.go create mode 100644 vendor/github.com/gobwas/glob/match/single.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/super.go create mode 100644 vendor/github.com/gobwas/glob/match/text.go create mode 100644 vendor/github.com/gobwas/glob/readme.md create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go create mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go create mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go create mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go create mode 100644 vendor/github.com/hashicorp/consul/LICENSE create mode 100644 vendor/github.com/hashicorp/consul/api/README.md create mode 100644 vendor/github.com/hashicorp/consul/api/acl.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent.go create mode 100644 vendor/github.com/hashicorp/consul/api/api.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go create mode 100644 vendor/github.com/hashicorp/consul/api/event.go create mode 100644 vendor/github.com/hashicorp/consul/api/health.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go create mode 100644 vendor/github.com/hashicorp/consul/api/raw.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go create mode 100644 vendor/github.com/hashicorp/consul/api/session.go create mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go create mode 100644 vendor/github.com/hashicorp/consul/api/status.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/Makefile create mode 100644 vendor/github.com/hashicorp/hcl/README.md create mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/hashicorp/serf/LICENSE create mode 100644 vendor/github.com/hashicorp/serf/coordinate/README.md create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/test_util.go create mode 100644 vendor/github.com/hashicorp/vault/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/api/SPEC.md create mode 100644 vendor/github.com/hashicorp/vault/api/auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/auth_token.go create mode 100644 vendor/github.com/hashicorp/vault/api/client.go create mode 100644 vendor/github.com/hashicorp/vault/api/help.go create mode 100644 vendor/github.com/hashicorp/vault/api/logical.go create mode 100644 vendor/github.com/hashicorp/vault/api/request.go create mode 100644 vendor/github.com/hashicorp/vault/api/response.go create mode 100644 vendor/github.com/hashicorp/vault/api/secret.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh_agent.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_audit.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_capabilities.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_generate_root.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_init.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leader.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_lease.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_mounts.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_policy.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rekey.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rotate.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_seal.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_stepdown.go create mode 100644 vendor/github.com/magiconair/properties/.gitignore create mode 100644 vendor/github.com/magiconair/properties/.travis.yml create mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md create mode 100644 vendor/github.com/magiconair/properties/LICENSE create mode 100644 vendor/github.com/magiconair/properties/README.md create mode 100644 vendor/github.com/magiconair/properties/decode.go create mode 100644 vendor/github.com/magiconair/properties/doc.go create mode 100644 vendor/github.com/magiconair/properties/integrate.go create mode 100644 vendor/github.com/magiconair/properties/lex.go create mode 100644 vendor/github.com/magiconair/properties/load.go create mode 100644 vendor/github.com/magiconair/properties/parser.go create mode 100644 vendor/github.com/magiconair/properties/properties.go create mode 100644 vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/pascaldekloe/goe/verify/values.go create mode 100644 vendor/github.com/pascaldekloe/goe/verify/verify.go create mode 100644 vendor/github.com/pubnub/go-metrics-statsd/LICENSE create mode 100644 vendor/github.com/pubnub/go-metrics-statsd/README.md create mode 100644 vendor/github.com/pubnub/go-metrics-statsd/statsd.go create mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore create mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/validate.sh create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100644 vendor/github.com/rogpeppe/fastuuid/LICENSE create mode 100644 vendor/github.com/rogpeppe/fastuuid/README.md create mode 100644 vendor/github.com/rogpeppe/fastuuid/uuid.go create mode 100644 vendor/github.com/sergi/go-diff/AUTHORS create mode 100644 vendor/github.com/sergi/go-diff/CONTRIBUTORS create mode 100644 vendor/github.com/sergi/go-diff/LICENSE create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/configure_transport.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/fixed_buffer.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/go16.go create mode 100644 vendor/golang.org/x/net/http2/go17.go create mode 100644 vendor/golang.org/x/net/http2/go17_not18.go create mode 100644 vendor/golang.org/x/net/http2/go18.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/not_go16.go create mode 100644 vendor/golang.org/x/net/http2/not_go17.go create mode 100644 vendor/golang.org/x/net/http2/not_go18.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/idna/idna.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go create mode 100644 vendor/golang.org/x/net/websocket/client.go create mode 100644 vendor/golang.org/x/net/websocket/dial.go create mode 100644 vendor/golang.org/x/net/websocket/hybi.go create mode 100644 vendor/golang.org/x/net/websocket/server.go create mode 100644 vendor/golang.org/x/net/websocket/websocket.go create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/modules.txt diff --git a/go.mod b/go.mod index 4ff408fb7..ed518370a 100644 --- a/go.mod +++ b/go.mod @@ -3,22 +3,22 @@ module github.com/fabiolb/fabio require ( github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 - github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 + github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 // indirect github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 - github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10 + github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10 // indirect github.com/gobwas/glob v0.0.0-20180208211842-19c076cdf202 github.com/hashicorp/consul v0.8.0 - github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce - github.com/hashicorp/go-cleanhttp v0.0.0-20160407174126-ad28ea4487f0 - github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 - github.com/hashicorp/go-retryablehttp v0.0.0-20160810172255-f4ed9b0fa01a - github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 - github.com/hashicorp/hcl v0.0.0-20160607001940-d7400db7143f - github.com/hashicorp/serf v0.7.0 + github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce // indirect + github.com/hashicorp/go-cleanhttp v0.0.0-20160407174126-ad28ea4487f0 // indirect + github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 // indirect + github.com/hashicorp/go-retryablehttp v0.0.0-20160810172255-f4ed9b0fa01a // indirect + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect + github.com/hashicorp/hcl v0.0.0-20160607001940-d7400db7143f // indirect + github.com/hashicorp/serf v0.7.0 // indirect github.com/hashicorp/vault v0.6.0 github.com/magiconair/properties v0.0.0-20171031211101-49d762b9817b - github.com/mitchellh/go-homedir v0.0.0-20160606030122-1111e456ffea - github.com/mitchellh/mapstructure v0.0.0-20160212031839-d2dd02622084 + github.com/mitchellh/go-homedir v0.0.0-20160606030122-1111e456ffea // indirect + github.com/mitchellh/mapstructure v0.0.0-20160212031839-d2dd02622084 // indirect github.com/pascaldekloe/goe v0.0.0-20150719195608-f99183613f48 github.com/pkg/profile v1.2.1 github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..74da97231 --- /dev/null +++ b/go.sum @@ -0,0 +1,52 @@ +github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 h1:AuTQOaYRBRtfDwY9ksr2sLXBgNvI5Mi6nhPdKdLE8Uw= +github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= +github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 h1:DHdPk46/oiLoYPQ7Dt10pRx74HaJVoyjS5Ea7eyROes= +github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 h1:g5Ns3zGpLJ0T+1YJ0d3VVPrlMMyFHXV4x82xNgIT9z0= +github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 h1:TYSgSeiA3LfBi6son0KgHb4wcTXV1kB5wZtTorXQ914= +github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= +github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10 h1:ajF8LGnOEaXJZD9XGNIawJ55pmntNqLiKdEFXo9/Bnk= +github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/gobwas/glob v0.0.0-20180208211842-19c076cdf202 h1:XcHVP1KCiY3CQI5oi8+7pRPD/5dC2NbhGZLn5CFGO74= +github.com/gobwas/glob v0.0.0-20180208211842-19c076cdf202/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/hashicorp/consul v0.8.0 h1:Z/JG7b8Djn1v4LwLT9YWSTrxMQwTidzT1sD6rJYF9Pc= +github.com/hashicorp/consul v0.8.0/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.0.0-20160407174126-ad28ea4487f0 h1:2l0haPDqCzZEO160UR5DSrrl8RWptFCoxFsSbRLJBaI= +github.com/hashicorp/go-cleanhttp v0.0.0-20160407174126-ad28ea4487f0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 h1:gb61U/o4ZJ6TRYvZqJUKYidIhJOEAvNyVMesryROxAY= +github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-retryablehttp v0.0.0-20160810172255-f4ed9b0fa01a h1:EyMMojcpa0OFWj8zjSPQ+8SsBVm+hMDqQlxjGdCxUDQ= +github.com/hashicorp/go-retryablehttp v0.0.0-20160810172255-f4ed9b0fa01a/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:9HVkPxOpo+yO93Ah4yrO67d/qh0fbLLWbKqhYjyHq9A= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/hcl v0.0.0-20160607001940-d7400db7143f h1:4/YA+0VGQiTOscm2IpwDVLncidInJKGuU62GBKeUbE8= +github.com/hashicorp/hcl v0.0.0-20160607001940-d7400db7143f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/serf v0.7.0 h1:9NBx0ZSoEtMrWrNf2ByXsmoKcpJyHIU7xGU/itMCtkg= +github.com/hashicorp/serf v0.7.0/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/vault v0.6.0 h1:wc/KN2SZ76imak5yOF4Utm6p0e4BNtSKLhWlYrzX+vQ= +github.com/hashicorp/vault v0.6.0/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= +github.com/magiconair/properties v0.0.0-20171031211101-49d762b9817b h1:bR3tkU6ocnK5a0NsdgTMWc7sILt+BY0PceUYC6EpSqc= +github.com/magiconair/properties v0.0.0-20171031211101-49d762b9817b/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mitchellh/go-homedir v0.0.0-20160606030122-1111e456ffea h1:6vbWobCwqTqcqPXwGOyVXdwdINhgsh7rEueNhT/89kM= +github.com/mitchellh/go-homedir v0.0.0-20160606030122-1111e456ffea/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v0.0.0-20160212031839-d2dd02622084 h1:Z2EJ6SUYrzx6J/YH/ltZthkUQTB/5+Ykpkew6y9DGJE= +github.com/mitchellh/mapstructure v0.0.0-20160212031839-d2dd02622084/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pascaldekloe/goe v0.0.0-20150719195608-f99183613f48 h1:8y3keSPOPy5pszxfE4wtwlAKuuIde8X6zcto98U4RYw= +github.com/pascaldekloe/goe v0.0.0-20150719195608-f99183613f48/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b h1:xCVl8I01z+W/Q+z7napcXRrOhsFzxmYLiBxigDbNQcg= +github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b/go.mod h1:5UoZ1X6PWZWpPxwpR8qZ/qTN2BXIrrYTV9j+6TaQngA= +github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk= +github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sergi/go-diff v0.0.0-20170118131230-24e2351369ec h1:shdBt9xKJPFOcYfVt160PFXcIKxJZC6165xf9qeWx/o= +github.com/sergi/go-diff v0.0.0-20170118131230-24e2351369ec/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +golang.org/x/net v0.0.0-20170114055629-f2499483f923 h1:cLl2p2++IYXqH4YzQ7uIc7QLOa4SU0x1vSy9Da++V94= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654 h1:ogYMTsPtkSw1JROKERM/ualwPUvE2UOP2KfhwFO6aVE= +golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/armon/go-proxyproto/.gitignore b/vendor/github.com/armon/go-proxyproto/.gitignore new file mode 100644 index 000000000..dd2440d55 --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/.gitignore @@ -0,0 +1,2 @@ +*.test +*~ diff --git a/vendor/github.com/armon/go-proxyproto/LICENSE b/vendor/github.com/armon/go-proxyproto/LICENSE new file mode 100644 index 000000000..3ed5f4302 --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/armon/go-proxyproto/README.md b/vendor/github.com/armon/go-proxyproto/README.md new file mode 100644 index 000000000..25a779cca --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/README.md @@ -0,0 +1,36 @@ +# proxyproto + +This library provides the `proxyproto` package which can be used for servers +listening behind HAProxy of Amazon ELB load balancers. Those load balancers +support the use of a proxy protocol (http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt), +which provides a simple mechansim for the server to get the address of the client +instead of the load balancer. + +This library provides both a net.Listener and net.Conn implementation that +can be used to handle situation in which you may be using the proxy protocol. +Only proxy protocol version 1, the human-readable form, is understood. + +The only caveat is that we check for the "PROXY " prefix to determine if the protocol +is being used. If that string may occur as part of your input, then it is ambiguous +if the protocol is being used and you may have problems. + +# Documentation + +Full documentation can be found [here](http://godoc.org/github.com/armon/go-proxyproto). + +# Examples + +Using the library is very simple: + +``` + +// Create a listener +list, err := net.Listen("tcp", "...") + +// Wrap listener in a proxyproto listener +proxyList := &proxyproto.Listener{list} +conn, err :=proxyList.Accept() + +... +``` + diff --git a/vendor/github.com/armon/go-proxyproto/protocol.go b/vendor/github.com/armon/go-proxyproto/protocol.go new file mode 100644 index 000000000..2fc1dfc01 --- /dev/null +++ b/vendor/github.com/armon/go-proxyproto/protocol.go @@ -0,0 +1,194 @@ +package proxyproto + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "net" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // prefix is the string we look for at the start of a connection + // to check if this connection is using the proxy protocol + prefix = []byte("PROXY ") + prefixLen = len(prefix) +) + +// Listener is used to wrap an underlying listener, +// whose connections may be using the HAProxy Proxy Protocol (version 1). +// If the connection is using the protocol, the RemoteAddr() will return +// the correct client address. +type Listener struct { + Listener net.Listener +} + +// Conn is used to wrap and underlying connection which +// may be speaking the Proxy Protocol. If it is, the RemoteAddr() will +// return the address of the client instead of the proxy address. +type Conn struct { + bufReader *bufio.Reader + conn net.Conn + dstAddr *net.TCPAddr + srcAddr *net.TCPAddr + once sync.Once +} + +// Accept waits for and returns the next connection to the listener. +func (p *Listener) Accept() (net.Conn, error) { + // Get the underlying connection + conn, err := p.Listener.Accept() + if err != nil { + return nil, err + } + return NewConn(conn), nil +} + +// Close closes the underlying listener. +func (p *Listener) Close() error { + return p.Listener.Close() +} + +// Addr returns the underlying listener's network address. +func (p *Listener) Addr() net.Addr { + return p.Listener.Addr() +} + +// NewConn is used to wrap a net.Conn that may be speaking +// the proxy protocol into a proxyproto.Conn +func NewConn(conn net.Conn) *Conn { + pConn := &Conn{ + bufReader: bufio.NewReader(conn), + conn: conn, + } + return pConn +} + +// Read is check for the proxy protocol header when doing +// the initial scan. If there is an error parsing the header, +// it is returned and the socket is closed. +func (p *Conn) Read(b []byte) (int, error) { + var err error + p.once.Do(func() { err = p.checkPrefix() }) + if err != nil { + return 0, err + } + return p.bufReader.Read(b) +} + +func (p *Conn) Write(b []byte) (int, error) { + return p.conn.Write(b) +} + +func (p *Conn) Close() error { + return p.conn.Close() +} + +func (p *Conn) LocalAddr() net.Addr { + return p.conn.LocalAddr() +} + +// RemoteAddr returns the address of the client if the proxy +// protocol is being used, otherwise just returns the address of +// the socket peer. If there is an error parsing the header, the +// address of the client is not returned, and the socket is closed. +// Once implication of this is that the call could block if the +// client is slow. Using a Deadline is recommended if this is called +// before Read() +func (p *Conn) RemoteAddr() net.Addr { + p.once.Do(func() { + if err := p.checkPrefix(); err != nil && err != io.EOF { + log.Printf("[ERR] Failed to read proxy prefix: %v", err) + } + }) + if p.srcAddr != nil { + return p.srcAddr + } + return p.conn.RemoteAddr() +} + +func (p *Conn) SetDeadline(t time.Time) error { + return p.conn.SetDeadline(t) +} + +func (p *Conn) SetReadDeadline(t time.Time) error { + return p.conn.SetReadDeadline(t) +} + +func (p *Conn) SetWriteDeadline(t time.Time) error { + return p.conn.SetWriteDeadline(t) +} + +func (p *Conn) checkPrefix() error { + // Incrementally check each byte of the prefix + for i := 1; i <= prefixLen; i++ { + inp, err := p.bufReader.Peek(i) + if err != nil { + return err + } + + // Check for a prefix mis-match, quit early + if !bytes.Equal(inp, prefix[:i]) { + return nil + } + } + + // Read the header line + header, err := p.bufReader.ReadString('\n') + if err != nil { + p.conn.Close() + return err + } + + // Strip the carriage return and new line + header = header[:len(header)-2] + + // Split on spaces, should be (PROXY ) + parts := strings.Split(header, " ") + if len(parts) != 6 { + p.conn.Close() + return fmt.Errorf("Invalid header line: %s", header) + } + + // Verify the type is known + switch parts[1] { + case "TCP4": + case "TCP6": + default: + p.conn.Close() + return fmt.Errorf("Unhandled address type: %s", parts[1]) + } + + // Parse out the source address + ip := net.ParseIP(parts[2]) + if ip == nil { + p.conn.Close() + return fmt.Errorf("Invalid source ip: %s", parts[2]) + } + port, err := strconv.Atoi(parts[4]) + if err != nil { + p.conn.Close() + return fmt.Errorf("Invalid source port: %s", parts[4]) + } + p.srcAddr = &net.TCPAddr{IP: ip, Port: port} + + // Parse out the destination address + ip = net.ParseIP(parts[3]) + if ip == nil { + p.conn.Close() + return fmt.Errorf("Invalid destination ip: %s", parts[3]) + } + port, err = strconv.Atoi(parts[5]) + if err != nil { + p.conn.Close() + return fmt.Errorf("Invalid destination port: %s", parts[5]) + } + p.dstAddr = &net.TCPAddr{IP: ip, Port: port} + + return nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore b/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore new file mode 100644 index 000000000..713e0e0aa --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +env.sh +NOTES.md diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/LICENSE b/vendor/github.com/circonus-labs/circonus-gometrics/LICENSE new file mode 100644 index 000000000..761798c3b --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2016, Circonus, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name Circonus, Inc. nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/README.md new file mode 100644 index 000000000..d245e40af --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/README.md @@ -0,0 +1,181 @@ +# Circonus metrics tracking for Go applications + +This library supports named counters, gauges and histograms. +It also provides convenience wrappers for registering latency +instrumented functions with Go's builtin http server. + +Initializing only requires setting an ApiToken. + +## Example + +**rough and simple** + +```go +package main + +import ( + "log" + "math/rand" + "os" + "time" + + cgm "github.com/circonus-labs/circonus-gometrics" +) + +func main() { + + log.Println("Configuring cgm") + + cmc := &cgm.Config{} + + // Interval at which metrics are submitted to Circonus, default: 10 seconds + cmc.Interval = "10s" // 10 seconds + // Enable debug messages, default: false + cmc.Debug = false + // Send debug messages to specific log.Logger instance + // default: if debug stderr, else, discard + //cmc.CheckManager.Log = ... + + // Circonus API configuration options + // + // Token, no default (blank disables check manager) + cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") + // App name, default: circonus-gometrics + cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") + // URL, default: https://api.circonus.com/v2 + cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") + + // Check configuration options + // + // precedence 1 - explicit submission_url + // precedence 2 - specific check id (note: not a check bundle id) + // precedence 3 - search using instanceId and searchTag + // otherwise: if an applicable check is NOT specified or found, an + // attempt will be made to automatically create one + // + // Pre-existing httptrap check submission_url + cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL") + // Pre-existing httptrap check id (check not check bundle) + cmc.CheckManager.Check.ID = "" + // if neither a submission url nor check id are provided, an attempt will be made to find an existing + // httptrap check by using the circonus api to search for a check matching the following criteria: + // an active check, + // of type httptrap, + // where the target/host is equal to InstanceId - see below + // and the check has a tag equal to SearchTag - see below + // Instance ID - an identifier for the 'group of metrics emitted by this process or service' + // this is used as the value for check.target (aka host) + // default: 'hostname':'program name' + // note: for a persistent instance that is ephemeral or transient where metric continuity is + // desired set this explicitly so that the current hostname will not be used. + cmc.CheckManager.Check.InstanceID = "" + // Search tag - a specific tag which, when coupled with the instanceId serves to identify the + // origin and/or grouping of the metrics + // default: service:application name (e.g. service:consul) + cmc.CheckManager.Check.SearchTag = "" + // Check secret, default: generated when a check needs to be created + cmc.CheckManager.Check.Secret = "" + // Check tags, array of strings, additional tags to add to a new check, default: none + //cmc.CheckManager.Check.Tags = []string{"category:tagname"} + // max amount of time to to hold on to a submission url + // when a given submission fails (due to retries) if the + // time the url was last updated is > than this, the trap + // url will be refreshed (e.g. if the broker is changed + // in the UI) default 5 minutes + cmc.CheckManager.Check.MaxURLAge = "5m" + // custom display name for check, default: "InstanceId /cgm" + cmc.CheckManager.Check.DisplayName = "" + // force metric activation - if a metric has been disabled via the UI + // the default behavior is to *not* re-activate the metric; this setting + // overrides the behavior and will re-activate the metric when it is + // encountered. "(true|false)", default "false" + cmc.CheckManager.Check.ForceMetricActivation = "false" + + // Broker configuration options + // + // Broker ID of specific broker to use, default: random enterprise broker or + // Circonus default if no enterprise brokers are available. + // default: only used if set + cmc.CheckManager.Broker.ID = "" + // used to select a broker with the same tag (e.g. can be used to dictate that a broker + // serving a specific location should be used. "dc:sfo", "location:new_york", "zone:us-west") + // if more than one broker has the tag, one will be selected randomly from the resulting list + // default: not used unless != "" + cmc.CheckManager.Broker.SelectTag = "" + // longest time to wait for a broker connection (if latency is > the broker will + // be considered invalid and not available for selection.), default: 500 milliseconds + cmc.CheckManager.Broker.MaxResponseTime = "500ms" + // if broker Id or SelectTag are not specified, a broker will be selected randomly + // from the list of brokers available to the api token. enterprise brokers take precedence + // viable brokers are "active", have the "httptrap" module enabled, are reachable and respond + // within MaxResponseTime. + + log.Println("Creating new cgm instance") + + metrics, err := cgm.NewCirconusMetrics(cmc) + if err != nil { + panic(err) + } + + src := rand.NewSource(time.Now().UnixNano()) + rnd := rand.New(src) + + log.Println("Starting cgm internal auto-flush timer") + metrics.Start() + + log.Println("Starting to send metrics") + + // number of "sets" of metrics to send (a minute worth) + max := 60 + + for i := 1; i < max; i++ { + log.Printf("\tmetric set %d of %d", i, 60) + metrics.Timing("ding", rnd.Float64()*10) + metrics.Increment("dong") + metrics.Gauge("dang", 10) + time.Sleep(1000 * time.Millisecond) + } + + log.Println("Flushing any outstanding metrics manually") + metrics.Flush() + +} +``` + +### HTTP Handler wrapping + +``` +http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func)) +``` + +### HTTP latency example + +``` +package main + +import ( + "os" + "fmt" + "net/http" + cgm "github.com/circonus-labs/circonus-gometrics" +) + +func main() { + cmc := &cgm.Config{} + cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") + + metrics, err := cgm.NewCirconusMetrics(cmc) + if err != nil { + panic(err) + } + metrics.Start() + + http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:]) + })) + http.ListenAndServe(":8080", http.DefaultServeMux) +} + +``` + +Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go new file mode 100644 index 000000000..7a7f18708 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go @@ -0,0 +1,214 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package api provides methods for interacting with the Circonus API +package api + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/hashicorp/go-retryablehttp" +) + +const ( + // a few sensible defaults + defaultAPIURL = "https://api.circonus.com/v2" + defaultAPIApp = "circonus-gometrics" + minRetryWait = 10 * time.Millisecond + maxRetryWait = 50 * time.Millisecond + maxRetries = 3 +) + +// TokenKeyType - Circonus API Token key +type TokenKeyType string + +// TokenAppType - Circonus API Token app name +type TokenAppType string + +// IDType Circonus object id (numeric portion of cid) +type IDType int + +// CIDType Circonus object cid +type CIDType string + +// URLType submission url type +type URLType string + +// SearchQueryType search query +type SearchQueryType string + +// SearchTagType search/select tag type +type SearchTagType string + +// Config options for Circonus API +type Config struct { + URL string + TokenKey string + TokenApp string + Log *log.Logger + Debug bool +} + +// API Circonus API +type API struct { + apiURL *url.URL + key TokenKeyType + app TokenAppType + Debug bool + Log *log.Logger +} + +// NewAPI returns a new Circonus API +func NewAPI(ac *Config) (*API, error) { + + if ac == nil { + return nil, errors.New("Invalid API configuration (nil)") + } + + key := TokenKeyType(ac.TokenKey) + if key == "" { + return nil, errors.New("API Token is required") + } + + app := TokenAppType(ac.TokenApp) + if app == "" { + app = defaultAPIApp + } + + au := string(ac.URL) + if au == "" { + au = defaultAPIURL + } + if !strings.Contains(au, "/") { + // if just a hostname is passed, ASSume "https" and a path prefix of "/v2" + au = fmt.Sprintf("https://%s/v2", ac.URL) + } + if last := len(au) - 1; last >= 0 && au[last] == '/' { + au = au[:last] + } + apiURL, err := url.Parse(au) + if err != nil { + return nil, err + } + + a := &API{apiURL, key, app, ac.Debug, ac.Log} + + if a.Log == nil { + if a.Debug { + a.Log = log.New(os.Stderr, "", log.LstdFlags) + } else { + a.Log = log.New(ioutil.Discard, "", log.LstdFlags) + } + } + + return a, nil +} + +// Get API request +func (a *API) Get(reqPath string) ([]byte, error) { + return a.apiCall("GET", reqPath, nil) +} + +// Delete API request +func (a *API) Delete(reqPath string) ([]byte, error) { + return a.apiCall("DELETE", reqPath, nil) +} + +// Post API request +func (a *API) Post(reqPath string, data []byte) ([]byte, error) { + return a.apiCall("POST", reqPath, data) +} + +// Put API request +func (a *API) Put(reqPath string, data []byte) ([]byte, error) { + return a.apiCall("PUT", reqPath, data) +} + +// apiCall call Circonus API +func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, error) { + dataReader := bytes.NewReader(data) + reqURL := a.apiURL.String() + + if reqPath[:1] != "/" { + reqURL += "/" + } + if reqPath[:3] == "/v2" { + reqURL += reqPath[3:len(reqPath)] + } else { + reqURL += reqPath + } + + req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader) + if err != nil { + return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err) + } + req.Header.Add("Accept", "application/json") + req.Header.Add("X-Circonus-Auth-Token", string(a.key)) + req.Header.Add("X-Circonus-App-Name", string(a.app)) + + // keep last HTTP error in the event of retry failure + var lastHTTPError error + retryPolicy := func(resp *http.Response, err error) (bool, error) { + if err != nil { + lastHTTPError = err + return true, err + } + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || resp.StatusCode >= 500 { + defer resp.Body.Close() + body, readErr := ioutil.ReadAll(resp.Body) + if readErr != nil { + lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr) + } else { + lastHTTPError = fmt.Errorf("- last HTTP error: %d %s", resp.StatusCode, string(body)) + } + return true, nil + } + return false, nil + } + + client := retryablehttp.NewClient() + client.RetryWaitMin = minRetryWait + client.RetryWaitMax = maxRetryWait + client.RetryMax = maxRetries + client.Logger = a.Log + client.CheckRetry = retryPolicy + + resp, err := client.Do(req) + if err != nil { + if lastHTTPError != nil { + return nil, fmt.Errorf("[ERROR] fetching: %+v %+v", err, lastHTTPError) + } + return nil, fmt.Errorf("[ERROR] fetching %s: %+v", reqURL, err) + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("[ERROR] reading body %+v", err) + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + msg := fmt.Sprintf("API response code %d: %s", resp.StatusCode, string(body)) + if a.Debug { + a.Log.Printf("[DEBUG] %s\n", msg) + } + + return nil, fmt.Errorf("[ERROR] %s", msg) + } + + return body, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go new file mode 100644 index 000000000..87e2b5e37 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go @@ -0,0 +1,89 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api + +import ( + "encoding/json" + "fmt" +) + +// BrokerDetail instance attributes +type BrokerDetail struct { + CN string `json:"cn"` + IP string `json:"ipaddress"` + MinVer int `json:"minimum_version_required"` + Modules []string `json:"modules"` + Port int `json:"port"` + Skew string `json:"skew"` + Status string `json:"status"` + Version int `json:"version"` +} + +// Broker definition +type Broker struct { + Cid string `json:"_cid"` + Details []BrokerDetail `json:"_details"` + Latitude string `json:"_latitude"` + Longitude string `json:"_longitude"` + Name string `json:"_name"` + Tags []string `json:"_tags"` + Type string `json:"_type"` +} + +// FetchBrokerByID fetch a broker configuration by [group]id +func (a *API) FetchBrokerByID(id IDType) (*Broker, error) { + cid := CIDType(fmt.Sprintf("/broker/%d", id)) + return a.FetchBrokerByCID(cid) +} + +// FetchBrokerByCID fetch a broker configuration by cid +func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) { + result, err := a.Get(string(cid)) + if err != nil { + return nil, err + } + + response := new(Broker) + if err := json.Unmarshal(result, &response); err != nil { + return nil, err + } + + return response, nil + +} + +// FetchBrokerListByTag return list of brokers with a specific tag +func (a *API) FetchBrokerListByTag(searchTag SearchTagType) ([]Broker, error) { + query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", searchTag)) + return a.BrokerSearch(query) +} + +// BrokerSearch return a list of brokers matching a query/filter +func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) { + queryURL := fmt.Sprintf("/broker?%s", string(query)) + + result, err := a.Get(queryURL) + if err != nil { + return nil, err + } + + var brokers []Broker + json.Unmarshal(result, &brokers) + + return brokers, nil +} + +// FetchBrokerList return list of all brokers available to the api token/app +func (a *API) FetchBrokerList() ([]Broker, error) { + result, err := a.Get("/broker") + if err != nil { + return nil, err + } + + var response []Broker + json.Unmarshal(result, &response) + + return response, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go new file mode 100644 index 000000000..df0524f43 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go @@ -0,0 +1,113 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// CheckDetails is an arbitrary json structure, we would only care about submission_url +type CheckDetails struct { + SubmissionURL string `json:"submission_url"` +} + +// Check definition +type Check struct { + Cid string `json:"_cid"` + Active bool `json:"_active"` + BrokerCid string `json:"_broker"` + CheckBundleCid string `json:"_check_bundle"` + CheckUUID string `json:"_check_uuid"` + Details CheckDetails `json:"_details"` +} + +// FetchCheckByID fetch a check configuration by id +func (a *API) FetchCheckByID(id IDType) (*Check, error) { + cid := CIDType(fmt.Sprintf("/check/%d", int(id))) + return a.FetchCheckByCID(cid) +} + +// FetchCheckByCID fetch a check configuration by cid +func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) { + result, err := a.Get(string(cid)) + if err != nil { + return nil, err + } + + check := new(Check) + json.Unmarshal(result, check) + + return check, nil +} + +// FetchCheckBySubmissionURL fetch a check configuration by submission_url +func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) { + + u, err := url.Parse(string(submissionURL)) + if err != nil { + return nil, err + } + + // valid trap url: scheme://host[:port]/module/httptrap/UUID/secret + + // does it smell like a valid trap url path + if u.Path[:17] != "/module/httptrap/" { + return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL) + } + + // extract uuid/secret + pathParts := strings.Split(u.Path[17:len(u.Path)], "/") + if len(pathParts) != 2 { + return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL) + } + + uuid := pathParts[0] + + query := SearchQueryType(fmt.Sprintf("f__check_uuid=%s", uuid)) + + checks, err := a.CheckSearch(query) + if err != nil { + return nil, err + } + + if len(checks) == 0 { + return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid) + } + + numActive := 0 + checkID := -1 + + for idx, check := range checks { + if check.Active { + numActive++ + checkID = idx + } + } + + if numActive > 1 { + return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid) + } + + return &checks[checkID], nil + +} + +// CheckSearch returns a list of checks matching a query/filter +func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) { + queryURL := fmt.Sprintf("/check?%s", string(query)) + + result, err := a.Get(queryURL) + if err != nil { + return nil, err + } + + var checks []Check + json.Unmarshal(result, &checks) + + return checks, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go new file mode 100644 index 000000000..559df7ac0 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go @@ -0,0 +1,132 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api + +import ( + "encoding/json" + "fmt" +) + +// CheckBundleConfig configuration specific to check type +type CheckBundleConfig struct { + AsyncMetrics bool `json:"async_metrics"` + Secret string `json:"secret"` + SubmissionURL string `json:"submission_url"` +} + +// CheckBundleMetric individual metric configuration +type CheckBundleMetric struct { + Name string `json:"name"` + Type string `json:"type"` + Units string `json:"units"` + Status string `json:"status"` +} + +// CheckBundle definition +type CheckBundle struct { + CheckUUIDs []string `json:"_check_uuids,omitempty"` + Checks []string `json:"_checks,omitempty"` + Cid string `json:"_cid,omitempty"` + Created int `json:"_created,omitempty"` + LastModified int `json:"_last_modified,omitempty"` + LastModifedBy string `json:"_last_modifed_by,omitempty"` + ReverseConnectUrls []string `json:"_reverse_connection_urls,omitempty"` + Brokers []string `json:"brokers"` + Config CheckBundleConfig `json:"config"` + DisplayName string `json:"display_name"` + Metrics []CheckBundleMetric `json:"metrics"` + MetricLimit int `json:"metric_limit"` + Notes string `json:"notes"` + Period int `json:"period"` + Status string `json:"status"` + Tags []string `json:"tags"` + Target string `json:"target"` + Timeout int `json:"timeout"` + Type string `json:"type"` +} + +// FetchCheckBundleByID fetch a check bundle configuration by id +func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) { + cid := CIDType(fmt.Sprintf("/check_bundle/%d", id)) + return a.FetchCheckBundleByCID(cid) +} + +// FetchCheckBundleByCID fetch a check bundle configuration by id +func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) { + result, err := a.Get(string(cid)) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + json.Unmarshal(result, checkBundle) + + return checkBundle, nil +} + +// CheckBundleSearch returns list of check bundles matching a search query +// - a search query not a filter (see: https://login.circonus.com/resources/api#searching) +func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) { + apiPath := fmt.Sprintf("/check_bundle?search=%s", searchCriteria) + + response, err := a.Get(apiPath) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var results []CheckBundle + err = json.Unmarshal(response, &results) + if err != nil { + return nil, fmt.Errorf("[ERROR] Parsing JSON response %+v", err) + } + + return results, nil +} + +// CreateCheckBundle create a new check bundle (check) +func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) { + cfgJSON, err := json.Marshal(config) + if err != nil { + return nil, err + } + + response, err := a.Post("/check_bundle", cfgJSON) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + err = json.Unmarshal(response, checkBundle) + if err != nil { + return nil, err + } + + return checkBundle, nil +} + +// UpdateCheckBundle updates a check bundle configuration +func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) { + if a.Debug { + a.Log.Printf("[DEBUG] Updating check bundle with new metrics.") + } + + cfgJSON, err := json.Marshal(config) + if err != nil { + return nil, err + } + + response, err := a.Put(config.Cid, cfgJSON) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + err = json.Unmarshal(response, checkBundle) + if err != nil { + return nil, err + } + + return checkBundle, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go new file mode 100644 index 000000000..b23486ce2 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go @@ -0,0 +1,199 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checkmgr + +import ( + "fmt" + "math/rand" + "net" + "net/url" + "reflect" + "strings" + "time" + + "github.com/circonus-labs/circonus-gometrics/api" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// Get Broker to use when creating a check +func (cm *CheckManager) getBroker() (*api.Broker, error) { + if cm.brokerID != 0 { + broker, err := cm.apih.FetchBrokerByID(cm.brokerID) + if err != nil { + return nil, err + } + if !cm.isValidBroker(broker) { + return nil, fmt.Errorf( + "[ERROR] designated broker %d [%s] is invalid (not active, does not support required check type, or connectivity issue)", + cm.brokerID, + broker.Name) + } + return broker, nil + } + broker, err := cm.selectBroker() + if err != nil { + return nil, fmt.Errorf("[ERROR] Unable to fetch suitable broker %s", err) + } + return broker, nil +} + +// Get CN of Broker associated with submission_url to satisfy no IP SANS in certs +func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLType) (string, error) { + u, err := url.Parse(string(submissionURL)) + if err != nil { + return "", err + } + + hostParts := strings.Split(u.Host, ":") + host := hostParts[0] + + if net.ParseIP(host) == nil { // it's a non-ip string + return u.Host, nil + } + + cn := "" + + for _, detail := range broker.Details { + if detail.IP == host { + cn = detail.CN + break + } + } + + if cn == "" { + return "", fmt.Errorf("[ERROR] Unable to match URL host (%s) to Broker", u.Host) + } + + return cn, nil + +} + +// Select a broker for use when creating a check, if a specific broker +// was not specified. +func (cm *CheckManager) selectBroker() (*api.Broker, error) { + var brokerList []api.Broker + var err error + + if cm.brokerSelectTag != "" { + brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag) + if err != nil { + return nil, err + } + } else { + brokerList, err = cm.apih.FetchBrokerList() + if err != nil { + return nil, err + } + } + + if len(brokerList) == 0 { + return nil, fmt.Errorf("zero brokers found") + } + + validBrokers := make(map[string]api.Broker) + haveEnterprise := false + + for _, broker := range brokerList { + if cm.isValidBroker(&broker) { + validBrokers[broker.Cid] = broker + if broker.Type == "enterprise" { + haveEnterprise = true + } + } + } + + if haveEnterprise { // eliminate non-enterprise brokers from valid brokers + for k, v := range validBrokers { + if v.Type != "enterprise" { + delete(validBrokers, k) + } + } + } + + if len(validBrokers) == 0 { + return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList)) + } + + validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys() + selectedBroker := validBrokers[validBrokerKeys[rand.Intn(len(validBrokerKeys))].String()] + + if cm.Debug { + cm.Log.Printf("[DEBUG] Selected broker '%s'\n", selectedBroker.Name) + } + + return &selectedBroker, nil + +} + +// Verify broker supports the check type to be used +func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool { + + for _, module := range details.Modules { + if CheckTypeType(module) == checkType { + return true + } + } + + return false + +} + +// Is the broker valid (active, supports check type, and reachable) +func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { + brokerPort := 0 + valid := false + for _, detail := range broker.Details { + brokerPort = 43191 + + // broker must be active + if detail.Status != statusActive { + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' is not active.\n", broker.Name) + } + continue + } + + // broker must have module loaded for the check type to be used + if !cm.brokerSupportsCheckType(cm.checkType, &detail) { + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' does not support '%s' checks.\n", broker.Name, cm.checkType) + } + continue + } + + // broker must be reachable and respond within designated time + conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.IP, brokerPort), cm.brokerMaxResponseTime) + if err != nil { + if detail.CN != "trap.noit.circonus.net" { + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' unable to connect, %v\n", broker.Name, err) + } + continue // not able to reach the broker (or respone slow enough for it to be considered not usable) + } + // if circonus trap broker, try port 443 + brokerPort = 443 + conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.CN, brokerPort), cm.brokerMaxResponseTime) + if err != nil { + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' unable to connect %v\n", broker.Name, err) + } + continue // not able to reach the broker on 443 either (or respone slow enough for it to be considered not usable) + } + } + conn.Close() + + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name) + } + + valid = true + break + + } + return valid +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go new file mode 100644 index 000000000..3f173bf5c --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go @@ -0,0 +1,87 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checkmgr + +import ( + "crypto/x509" + "encoding/json" + "fmt" +) + +// Default Circonus CA certificate +var circonusCA = []byte(`-----BEGIN CERTIFICATE----- +MIID4zCCA0ygAwIBAgIJAMelf8skwVWPMA0GCSqGSIb3DQEBBQUAMIGoMQswCQYD +VQQGEwJVUzERMA8GA1UECBMITWFyeWxhbmQxETAPBgNVBAcTCENvbHVtYmlhMRcw +FQYDVQQKEw5DaXJjb251cywgSW5jLjERMA8GA1UECxMIQ2lyY29udXMxJzAlBgNV +BAMTHkNpcmNvbnVzIENlcnRpZmljYXRlIEF1dGhvcml0eTEeMBwGCSqGSIb3DQEJ +ARYPY2FAY2lyY29udXMubmV0MB4XDTA5MTIyMzE5MTcwNloXDTE5MTIyMTE5MTcw +NlowgagxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMI +Q29sdW1iaWExFzAVBgNVBAoTDkNpcmNvbnVzLCBJbmMuMREwDwYDVQQLEwhDaXJj +b251czEnMCUGA1UEAxMeQ2lyY29udXMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4w +HAYJKoZIhvcNAQkBFg9jYUBjaXJjb251cy5uZXQwgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBAKz2X0/0vJJ4ad1roehFyxUXHdkjJA9msEKwT2ojummdUB3kK5z6 +PDzDL9/c65eFYWqrQWVWZSLQK1D+v9xJThCe93v6QkSJa7GZkCq9dxClXVtBmZH3 +hNIZZKVC6JMA9dpRjBmlFgNuIdN7q5aJsv8VZHH+QrAyr9aQmhDJAmk1AgMBAAGj +ggERMIIBDTAdBgNVHQ4EFgQUyNTsgZHSkhhDJ5i+6IFlPzKYxsUwgd0GA1UdIwSB +1TCB0oAUyNTsgZHSkhhDJ5i+6IFlPzKYxsWhga6kgaswgagxCzAJBgNVBAYTAlVT +MREwDwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQ29sdW1iaWExFzAVBgNVBAoT +DkNpcmNvbnVzLCBJbmMuMREwDwYDVQQLEwhDaXJjb251czEnMCUGA1UEAxMeQ2ly +Y29udXMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9jYUBj +aXJjb251cy5uZXSCCQDHpX/LJMFVjzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB +BQUAA4GBAAHBtl15BwbSyq0dMEBpEdQYhHianU/rvOMe57digBmox7ZkPEbB/baE +sYJysziA2raOtRxVRtcxuZSMij2RiJDsLxzIp1H60Xhr8lmf7qF6Y+sZl7V36KZb +n2ezaOoRtsQl9dhqEMe8zgL76p9YZ5E69Al0mgiifTteyNjjMuIW +-----END CERTIFICATE-----`) + +// CACert contains cert returned from Circonus API +type CACert struct { + Contents string `json:"contents"` +} + +// loadCACert loads the CA cert for the broker designated by the submission url +func (cm *CheckManager) loadCACert() { + if cm.certPool != nil { + return + } + + cm.certPool = x509.NewCertPool() + + cert, err := cm.fetchCert() + if err != nil { + if cm.Debug { + cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err) + } + } + + if cert == nil { + cert = circonusCA + } + + cm.certPool.AppendCertsFromPEM(cert) +} + +// fetchCert fetches CA certificate using Circonus API +func (cm *CheckManager) fetchCert() ([]byte, error) { + if !cm.enabled { + return circonusCA, nil + } + + response, err := cm.apih.Get("/pki/ca.crt") + if err != nil { + return nil, err + } + + cadata := new(CACert) + err = json.Unmarshal(response, cadata) + if err != nil { + return nil, err + } + + if cadata.Contents == "" { + return nil, fmt.Errorf("[ERROR] Unable to find ca cert %+v", cadata) + } + + return []byte(cadata.Contents), nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go new file mode 100644 index 000000000..56f6fed28 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go @@ -0,0 +1,211 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checkmgr + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/circonus-labs/circonus-gometrics/api" +) + +// Initialize CirconusMetrics instance. Attempt to find a check otherwise create one. +// use cases: +// +// check [bundle] by submission url +// check [bundle] by *check* id (note, not check_bundle id) +// check [bundle] by search +// create check [bundle] +func (cm *CheckManager) initializeTrapURL() error { + if cm.trapURL != "" { + return nil + } + + cm.trapmu.Lock() + defer cm.trapmu.Unlock() + + if cm.checkSubmissionURL != "" { + if !cm.enabled { + cm.trapURL = cm.checkSubmissionURL + cm.trapLastUpdate = time.Now() + return nil + } + } + + if !cm.enabled { + return errors.New("Unable to initialize trap, check manager is disabled.") + } + + var err error + var check *api.Check + var checkBundle *api.CheckBundle + var broker *api.Broker + + if cm.checkSubmissionURL != "" { + check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL) + if err != nil { + return err + } + // extract check id from check object returned from looking up using submission url + // set m.CheckId to the id + // set m.SubmissionUrl to "" to prevent trying to search on it going forward + // use case: if the broker is changed in the UI metrics would stop flowing + // unless the new submission url can be fetched with the API (which is no + // longer possible using the original submission url) + var id int + id, err = strconv.Atoi(strings.Replace(check.Cid, "/check/", "", -1)) + if err == nil { + cm.checkID = api.IDType(id) + cm.checkSubmissionURL = "" + } else { + cm.Log.Printf( + "[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\n", + check.Cid, err) + } + } else if cm.checkID > 0 { + check, err = cm.apih.FetchCheckByID(cm.checkID) + if err != nil { + return err + } + } else { + searchCriteria := fmt.Sprintf( + "(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)", + cm.checkInstanceID, cm.checkType, cm.checkSearchTag) + checkBundle, err = cm.checkBundleSearch(searchCriteria) + if err != nil { + return err + } + + if checkBundle == nil { + // err==nil && checkBundle==nil is "no check bundles matched" + // an error *should* be returned for any other invalid scenario + checkBundle, broker, err = cm.createNewCheck() + if err != nil { + return err + } + } + } + + if checkBundle == nil { + if check != nil { + checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCid)) + if err != nil { + return err + } + } else { + return fmt.Errorf("[ERROR] Unable to retrieve, find, or create check") + } + } + + if broker == nil { + broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0])) + if err != nil { + return err + } + } + + // retain to facilitate metric management (adding new metrics specifically) + cm.checkBundle = checkBundle + cm.inventoryMetrics() + + // url to which metrics should be PUT + cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL) + + // used when sending as "ServerName" get around certs not having IP SANS + // (cert created with server name as CN but IP used in trap url) + cn, err := cm.getBrokerCN(broker, cm.trapURL) + if err != nil { + return err + } + cm.trapCN = BrokerCNType(cn) + + cm.trapLastUpdate = time.Now() + + return nil +} + +// Search for a check bundle given a predetermined set of criteria +func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, error) { + checkBundles, err := cm.apih.CheckBundleSearch(api.SearchQueryType(criteria)) + if err != nil { + return nil, err + } + + if len(checkBundles) == 0 { + return nil, nil // trigger creation of a new check + } + + numActive := 0 + checkID := -1 + + for idx, check := range checkBundles { + if check.Status == statusActive { + numActive++ + checkID = idx + } + } + + if numActive > 1 { + return nil, fmt.Errorf("[ERROR] Multiple possibilities multiple check bundles match criteria %s\n", criteria) + } + + return &checkBundles[checkID], nil +} + +// Create a new check to receive metrics +func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) { + checkSecret := string(cm.checkSecret) + if checkSecret == "" { + secret, err := cm.makeSecret() + if err != nil { + secret = "myS3cr3t" + } + checkSecret = secret + } + + broker, err := cm.getBroker() + if err != nil { + return nil, nil, err + } + + config := api.CheckBundle{ + Brokers: []string{broker.Cid}, + Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret}, + DisplayName: string(cm.checkDisplayName), + Metrics: []api.CheckBundleMetric{}, + MetricLimit: 0, + Notes: "", + Period: 60, + Status: statusActive, + Tags: append([]string{string(cm.checkSearchTag)}, cm.checkTags...), + Target: string(cm.checkInstanceID), + Timeout: 10, + Type: string(cm.checkType), + } + + checkBundle, err := cm.apih.CreateCheckBundle(config) + if err != nil { + return nil, nil, err + } + + return checkBundle, broker, nil +} + +// Create a dynamic secret to use with a new check +func (cm *CheckManager) makeSecret() (string, error) { + hash := sha256.New() + x := make([]byte, 2048) + if _, err := rand.Read(x); err != nil { + return "", err + } + hash.Write(x) + return hex.EncodeToString(hash.Sum(nil))[0:16], nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go new file mode 100644 index 000000000..e23f00a57 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go @@ -0,0 +1,365 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checkmgr provides a check management interace to circonus-gometrics +package checkmgr + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "log" + "net/url" + "os" + "path" + "strconv" + "sync" + "time" + + "github.com/circonus-labs/circonus-gometrics/api" +) + +// Check management offers: +// +// Create a check if one cannot be found matching specific criteria +// Manage metrics in the supplied check (enabling new metrics as they are submitted) +// +// To disable check management, leave Config.Api.Token.Key blank +// +// use cases: +// configure without api token - check management disabled +// - configuration parameters other than Check.SubmissionUrl, Debug and Log are ignored +// - note: SubmissionUrl is **required** in this case as there is no way to derive w/o api +// configure with api token - check management enabled +// - all otehr configuration parameters affect how the trap url is obtained +// 1. provided (Check.SubmissionUrl) +// 2. via check lookup (CheckConfig.Id) +// 3. via a search using CheckConfig.InstanceId + CheckConfig.SearchTag +// 4. a new check is created + +const ( + defaultCheckType = "httptrap" + defaultTrapMaxURLAge = "60s" // 60 seconds + defaultBrokerMaxResponseTime = "500ms" // 500 milliseconds + defaultForceMetricActivation = "false" + statusActive = "active" +) + +// CheckConfig options for check +type CheckConfig struct { + // a specific submission url + SubmissionURL string + // a specific check id (not check bundle id) + ID string + // unique instance id string + // used to search for a check to use + // used as check.target when creating a check + InstanceID string + // unique check searching tag + // used to search for a check to use (combined with instanceid) + // used as a regular tag when creating a check + SearchTag string + // a custom display name for the check (as viewed in UI Checks) + DisplayName string + // httptrap check secret (for creating a check) + Secret string + // additional tags to add to a check (when creating a check) + // these tags will not be added to an existing check + Tags []string + // max amount of time to to hold on to a submission url + // when a given submission fails (due to retries) if the + // time the url was last updated is > than this, the trap + // url will be refreshed (e.g. if the broker is changed + // in the UI) **only relevant when check management is enabled** + // e.g. 5m, 30m, 1h, etc. + MaxURLAge string + // force metric activation - if a metric has been disabled via the UI + // the default behavior is to *not* re-activate the metric; this setting + // overrides the behavior and will re-activate the metric when it is + // encountered. "(true|false)", default "false" + ForceMetricActivation string +} + +// BrokerConfig options for broker +type BrokerConfig struct { + // a specific broker id (numeric portion of cid) + ID string + // a tag that can be used to select 1-n brokers from which to select + // when creating a new check (e.g. datacenter:abc) + SelectTag string + // for a broker to be considered viable it must respond to a + // connection attempt within this amount of time e.g. 200ms, 2s, 1m + MaxResponseTime string +} + +// Config options +type Config struct { + Log *log.Logger + Debug bool + + // Circonus API config + API api.Config + // Check specific configuration options + Check CheckConfig + // Broker specific configuration options + Broker BrokerConfig +} + +// CheckTypeType check type +type CheckTypeType string + +// CheckInstanceIDType check instance id +type CheckInstanceIDType string + +// CheckSecretType check secret +type CheckSecretType string + +// CheckTagsType check tags +type CheckTagsType []string + +// CheckDisplayNameType check display name +type CheckDisplayNameType string + +// BrokerCNType broker common name +type BrokerCNType string + +// CheckManager settings +type CheckManager struct { + enabled bool + Log *log.Logger + Debug bool + apih *api.API + + // check + checkType CheckTypeType + checkID api.IDType + checkInstanceID CheckInstanceIDType + checkSearchTag api.SearchTagType + checkSecret CheckSecretType + checkTags CheckTagsType + checkSubmissionURL api.URLType + checkDisplayName CheckDisplayNameType + forceMetricActivation bool + + // broker + brokerID api.IDType + brokerSelectTag api.SearchTagType + brokerMaxResponseTime time.Duration + + // state + checkBundle *api.CheckBundle + availableMetrics map[string]bool + trapURL api.URLType + trapCN BrokerCNType + trapLastUpdate time.Time + trapMaxURLAge time.Duration + trapmu sync.Mutex + certPool *x509.CertPool +} + +// Trap config +type Trap struct { + URL *url.URL + TLS *tls.Config +} + +// NewCheckManager returns a new check manager +func NewCheckManager(cfg *Config) (*CheckManager, error) { + + if cfg == nil { + return nil, errors.New("Invalid Check Manager configuration (nil).") + } + + cm := &CheckManager{ + enabled: false, + } + + cm.Debug = cfg.Debug + + cm.Log = cfg.Log + if cm.Log == nil { + if cm.Debug { + cm.Log = log.New(os.Stderr, "", log.LstdFlags) + } else { + cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) + } + } + + if cfg.Check.SubmissionURL != "" { + cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL) + } + // Blank API Token *disables* check management + if cfg.API.TokenKey == "" { + if cm.checkSubmissionURL == "" { + return nil, errors.New("Invalid check manager configuration (no API token AND no submission url).") + } + if err := cm.initializeTrapURL(); err != nil { + return nil, err + } + return cm, nil + } + + // enable check manager + + cm.enabled = true + + // initialize api handle + + cfg.API.Debug = cm.Debug + cfg.API.Log = cm.Log + + apih, err := api.NewAPI(&cfg.API) + if err != nil { + return nil, err + } + cm.apih = apih + + // initialize check related data + + cm.checkType = defaultCheckType + + idSetting := "0" + if cfg.Check.ID != "" { + idSetting = cfg.Check.ID + } + id, err := strconv.Atoi(idSetting) + if err != nil { + return nil, err + } + cm.checkID = api.IDType(id) + + cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID) + cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName) + cm.checkSearchTag = api.SearchTagType(cfg.Check.SearchTag) + cm.checkSecret = CheckSecretType(cfg.Check.Secret) + cm.checkTags = cfg.Check.Tags + + fma := defaultForceMetricActivation + if cfg.Check.ForceMetricActivation != "" { + fma = cfg.Check.ForceMetricActivation + } + fm, err := strconv.ParseBool(fma) + if err != nil { + return nil, err + } + cm.forceMetricActivation = fm + + _, an := path.Split(os.Args[0]) + hn, err := os.Hostname() + if err != nil { + hn = "unknown" + } + if cm.checkInstanceID == "" { + cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an)) + } + + if cm.checkSearchTag == "" { + cm.checkSearchTag = api.SearchTagType(fmt.Sprintf("service:%s", an)) + } + + if cm.checkDisplayName == "" { + cm.checkDisplayName = CheckDisplayNameType(fmt.Sprintf("%s /cgm", string(cm.checkInstanceID))) + } + + dur := cfg.Check.MaxURLAge + if dur == "" { + dur = defaultTrapMaxURLAge + } + maxDur, err := time.ParseDuration(dur) + if err != nil { + return nil, err + } + cm.trapMaxURLAge = maxDur + + // setup broker + + idSetting = "0" + if cfg.Broker.ID != "" { + idSetting = cfg.Broker.ID + } + id, err = strconv.Atoi(idSetting) + if err != nil { + return nil, err + } + cm.brokerID = api.IDType(id) + + cm.brokerSelectTag = api.SearchTagType(cfg.Broker.SelectTag) + + dur = cfg.Broker.MaxResponseTime + if dur == "" { + dur = defaultBrokerMaxResponseTime + } + maxDur, err = time.ParseDuration(dur) + if err != nil { + return nil, err + } + cm.brokerMaxResponseTime = maxDur + + // metrics + cm.availableMetrics = make(map[string]bool) + + if err := cm.initializeTrapURL(); err != nil { + return nil, err + } + + return cm, nil +} + +// GetTrap return the trap url +func (cm *CheckManager) GetTrap() (*Trap, error) { + if cm.trapURL == "" { + if err := cm.initializeTrapURL(); err != nil { + return nil, err + } + } + + trap := &Trap{} + + u, err := url.Parse(string(cm.trapURL)) + if err != nil { + return nil, err + } + + trap.URL = u + + if u.Scheme == "https" { + if cm.certPool == nil { + cm.loadCACert() + } + t := &tls.Config{ + RootCAs: cm.certPool, + } + if cm.trapCN != "" { + t.ServerName = string(cm.trapCN) + } + trap.TLS = t + } + + return trap, nil +} + +// ResetTrap URL, force request to the API for the submission URL and broker ca cert +func (cm *CheckManager) ResetTrap() error { + if cm.trapURL == "" { + return nil + } + + cm.trapURL = "" + cm.certPool = nil + err := cm.initializeTrapURL() + return err +} + +// RefreshTrap check when the last time the URL was reset, reset if needed +func (cm *CheckManager) RefreshTrap() { + if cm.trapURL == "" { + return + } + + if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge { + cm.ResetTrap() + } +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go new file mode 100644 index 000000000..8f03d4476 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go @@ -0,0 +1,79 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checkmgr + +import ( + "github.com/circonus-labs/circonus-gometrics/api" +) + +// IsMetricActive checks whether a given metric name is currently active(enabled) +func (cm *CheckManager) IsMetricActive(name string) bool { + active, _ := cm.availableMetrics[name] + return active +} + +// ActivateMetric determines if a given metric should be activated +func (cm *CheckManager) ActivateMetric(name string) bool { + active, exists := cm.availableMetrics[name] + + if !exists { + return true + } + + if !active && cm.forceMetricActivation { + return true + } + + return false +} + +// AddNewMetrics updates a check bundle with new metrics +func (cm *CheckManager) AddNewMetrics(newMetrics map[string]*api.CheckBundleMetric) { + // only if check manager is enabled + if !cm.enabled { + return + } + + // only if checkBundle has been populated + if cm.checkBundle == nil { + return + } + + newCheckBundle := cm.checkBundle + numCurrMetrics := len(newCheckBundle.Metrics) + numNewMetrics := len(newMetrics) + + if numCurrMetrics+numNewMetrics >= cap(newCheckBundle.Metrics) { + nm := make([]api.CheckBundleMetric, numCurrMetrics+numNewMetrics) + copy(nm, newCheckBundle.Metrics) + newCheckBundle.Metrics = nm + } + + newCheckBundle.Metrics = newCheckBundle.Metrics[0 : numCurrMetrics+numNewMetrics] + + i := 0 + for _, metric := range newMetrics { + newCheckBundle.Metrics[numCurrMetrics+i] = *metric + i++ + } + + checkBundle, err := cm.apih.UpdateCheckBundle(newCheckBundle) + if err != nil { + cm.Log.Printf("[ERROR] updating check bundle with new metrics %v", err) + return + } + + cm.checkBundle = checkBundle + cm.inventoryMetrics() +} + +// inventoryMetrics creates list of active metrics in check bundle +func (cm *CheckManager) inventoryMetrics() { + availableMetrics := make(map[string]bool) + for _, metric := range cm.checkBundle.Metrics { + availableMetrics[metric.Name] = metric.Status == "active" + } + cm.availableMetrics = availableMetrics +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go new file mode 100644 index 000000000..34166c007 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go @@ -0,0 +1,254 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package circonusgometrics provides instrumentation for your applications in the form +// of counters, gauges and histograms and allows you to publish them to +// Circonus +// +// Counters +// +// A counter is a monotonically-increasing, unsigned, 64-bit integer used to +// represent the number of times an event has occurred. By tracking the deltas +// between measurements of a counter over intervals of time, an aggregation +// layer can derive rates, acceleration, etc. +// +// Gauges +// +// A gauge returns instantaneous measurements of something using signed, 64-bit +// integers. This value does not need to be monotonic. +// +// Histograms +// +// A histogram tracks the distribution of a stream of values (e.g. the number of +// seconds it takes to handle requests). Circonus can calculate complex +// analytics on these. +// +// Reporting +// +// A period push to a Circonus httptrap is confgurable. +package circonusgometrics + +import ( + "errors" + "io/ioutil" + "log" + "os" + "sync" + "time" + + "github.com/circonus-labs/circonus-gometrics/api" + "github.com/circonus-labs/circonus-gometrics/checkmgr" +) + +const ( + defaultFlushInterval = "10s" // 10 * time.Second +) + +// Config options for circonus-gometrics +type Config struct { + Log *log.Logger + Debug bool + + // API, Check and Broker configuration options + CheckManager checkmgr.Config + + // how frequenly to submit metrics to Circonus, default 10 seconds + Interval string +} + +// CirconusMetrics state +type CirconusMetrics struct { + Log *log.Logger + Debug bool + flushInterval time.Duration + flushing bool + flushmu sync.Mutex + check *checkmgr.CheckManager + + counters map[string]uint64 + cm sync.Mutex + + counterFuncs map[string]func() uint64 + cfm sync.Mutex + + gauges map[string]string + gm sync.Mutex + + gaugeFuncs map[string]func() int64 + gfm sync.Mutex + + histograms map[string]*Histogram + hm sync.Mutex + + text map[string]string + tm sync.Mutex + + textFuncs map[string]func() string + tfm sync.Mutex +} + +// NewCirconusMetrics returns a CirconusMetrics instance +func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) { + + if cfg == nil { + return nil, errors.New("Invalid configuration (nil).") + } + + cm := &CirconusMetrics{ + counters: make(map[string]uint64), + counterFuncs: make(map[string]func() uint64), + gauges: make(map[string]string), + gaugeFuncs: make(map[string]func() int64), + histograms: make(map[string]*Histogram), + text: make(map[string]string), + textFuncs: make(map[string]func() string), + } + + cm.Debug = cfg.Debug + if cm.Debug { + if cfg.Log == nil { + cm.Log = log.New(os.Stderr, "", log.LstdFlags) + } else { + cm.Log = cfg.Log + } + } + if cm.Log == nil { + cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) + } + + fi := defaultFlushInterval + if cfg.Interval != "" { + fi = cfg.Interval + } + + dur, err := time.ParseDuration(fi) + if err != nil { + return nil, err + } + cm.flushInterval = dur + + cfg.CheckManager.Debug = cm.Debug + cfg.CheckManager.Log = cm.Log + + check, err := checkmgr.NewCheckManager(&cfg.CheckManager) + if err != nil { + return nil, err + } + cm.check = check + + if _, err := cm.check.GetTrap(); err != nil { + return nil, err + } + + return cm, nil +} + +// Start initializes the CirconusMetrics instance based on +// configuration settings and sets the httptrap check url to +// which metrics should be sent. It then starts a perdiodic +// submission process of all metrics collected. +func (m *CirconusMetrics) Start() { + go func() { + for _ = range time.NewTicker(m.flushInterval).C { + m.Flush() + } + }() +} + +// Flush metrics kicks off the process of sending metrics to Circonus +func (m *CirconusMetrics) Flush() { + if m.flushing { + return + } + m.flushmu.Lock() + m.flushing = true + m.flushmu.Unlock() + + if m.Debug { + m.Log.Println("[DEBUG] Flushing metrics") + } + + // check for new metrics and enable them automatically + newMetrics := make(map[string]*api.CheckBundleMetric) + + counters, gauges, histograms, text := m.snapshot() + output := make(map[string]interface{}) + for name, value := range counters { + send := m.check.IsMetricActive(name) + if !send && m.check.ActivateMetric(name) { + send = true + newMetrics[name] = &api.CheckBundleMetric{ + Name: name, + Type: "numeric", + Status: "active", + } + } + if send { + output[name] = map[string]interface{}{ + "_type": "n", + "_value": value, + } + } + } + + for name, value := range gauges { + send := m.check.IsMetricActive(name) + if !send && m.check.ActivateMetric(name) { + send = true + newMetrics[name] = &api.CheckBundleMetric{ + Name: name, + Type: "numeric", + Status: "active", + } + } + if send { + output[name] = map[string]interface{}{ + "_type": "n", + "_value": value, + } + } + } + + for name, value := range histograms { + send := m.check.IsMetricActive(name) + if !send && m.check.ActivateMetric(name) { + send = true + newMetrics[name] = &api.CheckBundleMetric{ + Name: name, + Type: "histogram", + Status: "active", + } + } + if send { + output[name] = map[string]interface{}{ + "_type": "n", + "_value": value.DecStrings(), + } + } + } + + for name, value := range text { + send := m.check.IsMetricActive(name) + if !send && m.check.ActivateMetric(name) { + send = true + newMetrics[name] = &api.CheckBundleMetric{ + Name: name, + Type: "text", + Status: "active", + } + } + if send { + output[name] = map[string]interface{}{ + "_type": "s", + "_value": value, + } + } + } + + m.submit(output, newMetrics) + + m.flushmu.Lock() + m.flushing = false + m.flushmu.Unlock() +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go new file mode 100644 index 000000000..2b34961f1 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go @@ -0,0 +1,55 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +// A Counter is a monotonically increasing unsigned integer. +// +// Use a counter to derive rates (e.g., record total number of requests, derive +// requests per second). + +// Increment counter by 1 +func (m *CirconusMetrics) Increment(metric string) { + m.Add(metric, 1) +} + +// IncrementByValue updates counter by supplied value +func (m *CirconusMetrics) IncrementByValue(metric string, val uint64) { + m.Add(metric, val) +} + +// Set a counter to specific value +func (m *CirconusMetrics) Set(metric string, val uint64) { + m.cm.Lock() + defer m.cm.Unlock() + m.counters[metric] = val +} + +// Add updates counter by supplied value +func (m *CirconusMetrics) Add(metric string, val uint64) { + m.cm.Lock() + defer m.cm.Unlock() + m.counters[metric] += val +} + +// RemoveCounter removes the named counter +func (m *CirconusMetrics) RemoveCounter(metric string) { + m.cm.Lock() + defer m.cm.Unlock() + delete(m.counters, metric) +} + +// SetCounterFunc set counter to a function [called at flush interval] +func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) { + m.cfm.Lock() + defer m.cfm.Unlock() + m.counterFuncs[metric] = fn +} + +// RemoveCounterFunc removes the named counter function +func (m *CirconusMetrics) RemoveCounterFunc(metric string) { + m.cfm.Lock() + defer m.cfm.Unlock() + delete(m.counterFuncs, metric) +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go new file mode 100644 index 000000000..b44236959 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go @@ -0,0 +1,81 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +// A Gauge is an instantaneous measurement of a value. +// +// Use a gauge to track metrics which increase and decrease (e.g., amount of +// free memory). + +import ( + "fmt" +) + +// Gauge sets a gauge to a value +func (m *CirconusMetrics) Gauge(metric string, val interface{}) { + m.SetGauge(metric, val) +} + +// SetGauge sets a gauge to a value +func (m *CirconusMetrics) SetGauge(metric string, val interface{}) { + m.gm.Lock() + defer m.gm.Unlock() + m.gauges[metric] = m.gaugeValString(val) +} + +// RemoveGauge removes a gauge +func (m *CirconusMetrics) RemoveGauge(metric string) { + m.gm.Lock() + defer m.gm.Unlock() + delete(m.gauges, metric) +} + +// SetGaugeFunc sets a gauge to a function [called at flush interval] +func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) { + m.gfm.Lock() + defer m.gfm.Unlock() + m.gaugeFuncs[metric] = fn +} + +// RemoveGaugeFunc removes a gauge function +func (m *CirconusMetrics) RemoveGaugeFunc(metric string) { + m.gfm.Lock() + defer m.gfm.Unlock() + delete(m.gaugeFuncs, metric) +} + +// gaugeValString converts an interface value (of a supported type) to a string +func (m *CirconusMetrics) gaugeValString(val interface{}) string { + vs := "" + switch v := val.(type) { + default: + // ignore it, unsupported type + case int: + vs = fmt.Sprintf("%d", v) + case int8: + vs = fmt.Sprintf("%d", v) + case int16: + vs = fmt.Sprintf("%d", v) + case int32: + vs = fmt.Sprintf("%d", v) + case int64: + vs = fmt.Sprintf("%d", v) + case uint: + vs = fmt.Sprintf("%d", v) + case uint8: + vs = fmt.Sprintf("%d", v) + case uint16: + vs = fmt.Sprintf("%d", v) + case uint32: + vs = fmt.Sprintf("%d", v) + case uint64: + vs = fmt.Sprintf("%d", v) + case float32: + vs = fmt.Sprintf("%f", v) + case float64: + vs = fmt.Sprintf("%f", v) + } + return vs +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go new file mode 100644 index 000000000..59d19e20f --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go @@ -0,0 +1,77 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +import ( + "sync" + + "github.com/circonus-labs/circonusllhist" +) + +// Histogram measures the distribution of a stream of values. +type Histogram struct { + name string + hist *circonusllhist.Histogram + rw sync.RWMutex +} + +// Timing adds a value to a histogram +func (m *CirconusMetrics) Timing(metric string, val float64) { + m.SetHistogramValue(metric, val) +} + +// RecordValue adds a value to a histogram +func (m *CirconusMetrics) RecordValue(metric string, val float64) { + m.SetHistogramValue(metric, val) +} + +// SetHistogramValue adds a value to a histogram +func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { + m.NewHistogram(metric) + + m.histograms[metric].rw.Lock() + defer m.histograms[metric].rw.Unlock() + + m.histograms[metric].hist.RecordValue(val) +} + +// RemoveHistogram removes a histogram +func (m *CirconusMetrics) RemoveHistogram(metric string) { + m.hm.Lock() + defer m.hm.Unlock() + delete(m.histograms, metric) +} + +// NewHistogram returns a histogram instance. +func (m *CirconusMetrics) NewHistogram(metric string) *Histogram { + m.hm.Lock() + defer m.hm.Unlock() + + if hist, ok := m.histograms[metric]; ok { + return hist + } + + hist := &Histogram{ + name: metric, + hist: circonusllhist.New(), + } + + m.histograms[metric] = hist + + return hist +} + +// Name returns the name from a histogram instance +func (h *Histogram) Name() string { + return h.name +} + +// RecordValue records the given value to a histogram instance +func (h *Histogram) RecordValue(v float64) { + h.rw.Lock() + defer h.rw.Unlock() + + h.hist.RecordValue(v) +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go new file mode 100644 index 000000000..abeb73006 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go @@ -0,0 +1,126 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +import ( + "bytes" + "encoding/json" + "errors" + "io/ioutil" + "log" + "net" + "net/http" + "strconv" + "time" + + "github.com/circonus-labs/circonus-gometrics/api" + "github.com/hashicorp/go-retryablehttp" +) + +func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) { + if len(newMetrics) > 0 { + m.check.AddNewMetrics(newMetrics) + } + + str, err := json.Marshal(output) + if err != nil { + m.Log.Printf("[ERROR] marshling output %+v", err) + return + } + + numStats, err := m.trapCall(str) + if err != nil { + m.Log.Printf("[ERROR] %+v\n", err) + return + } + + if m.Debug { + m.Log.Printf("[DEBUG] %d stats sent\n", numStats) + } +} + +func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { + trap, err := m.check.GetTrap() + if err != nil { + return 0, err + } + + dataReader := bytes.NewReader(payload) + + req, err := retryablehttp.NewRequest("PUT", trap.URL.String(), dataReader) + if err != nil { + return 0, err + } + req.Header.Add("Accept", "application/json") + + client := retryablehttp.NewClient() + if trap.URL.Scheme == "https" { + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: trap.TLS, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } else { + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } + client.RetryWaitMin = 10 * time.Millisecond + client.RetryWaitMax = 50 * time.Millisecond + client.RetryMax = 3 + client.Logger = m.Log + + attempts := -1 + client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) { + attempts = retryNumber + } + + resp, err := client.Do(req) + if err != nil { + if attempts == client.RetryMax { + m.check.RefreshTrap() + } + return 0, err + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err) + } + + var response map[string]interface{} + err = json.Unmarshal(body, &response) + if err != nil { + m.Log.Printf("[ERROR] parsing body, proceeding. %s\n", err) + } + + if resp.StatusCode != 200 { + return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode)) + } + switch v := response["stats"].(type) { + case float64: + return int(v), nil + case int: + return v, nil + default: + } + return 0, errors.New("[ERROR] bad response type") +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/text.go b/vendor/github.com/circonus-labs/circonus-gometrics/text.go new file mode 100644 index 000000000..eb2d12a87 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/text.go @@ -0,0 +1,41 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +// A Text metric is an arbitrary string +// + +// SetText sets a text metric +func (m *CirconusMetrics) SetText(metric string, val string) { + m.SetTextValue(metric, val) +} + +// SetTextValue sets a text metric +func (m *CirconusMetrics) SetTextValue(metric string, val string) { + m.tm.Lock() + defer m.tm.Unlock() + m.text[metric] = val +} + +// RemoveText removes a text metric +func (m *CirconusMetrics) RemoveText(metric string) { + m.tm.Lock() + defer m.tm.Unlock() + delete(m.text, metric) +} + +// SetTextFunc sets a text metric to a function [called at flush interval] +func (m *CirconusMetrics) SetTextFunc(metric string, fn func() string) { + m.tfm.Lock() + defer m.tfm.Unlock() + m.textFuncs[metric] = fn +} + +// RemoveTextFunc a text metric function +func (m *CirconusMetrics) RemoveTextFunc(metric string) { + m.tfm.Lock() + defer m.tfm.Unlock() + delete(m.textFuncs, metric) +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go new file mode 100644 index 000000000..73259a7b1 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go @@ -0,0 +1,23 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +import ( + "net/http" + "time" +) + +// TrackHTTPLatency wraps Handler functions registered with an http.ServerMux tracking latencies. +// Metrics are of the for go`HTTP```latency and are tracked in a histogram in units +// of seconds (as a float64) providing nanosecond ganularity. +func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { + return func(rw http.ResponseWriter, req *http.Request) { + start := time.Now().UnixNano() + handler(rw, req) + elapsed := time.Now().UnixNano() - start + //hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency") + m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second)) + } +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/util.go b/vendor/github.com/circonus-labs/circonus-gometrics/util.go new file mode 100644 index 000000000..d5cc7d56c --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/util.go @@ -0,0 +1,100 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +import ( + "github.com/circonus-labs/circonusllhist" +) + +// Reset removes all existing counters and gauges. +func (m *CirconusMetrics) Reset() { + m.cm.Lock() + defer m.cm.Unlock() + + m.cfm.Lock() + defer m.cfm.Unlock() + + m.gm.Lock() + defer m.gm.Unlock() + + m.gfm.Lock() + defer m.gfm.Unlock() + + m.hm.Lock() + defer m.hm.Unlock() + + m.tm.Lock() + defer m.tm.Unlock() + + m.tfm.Lock() + defer m.tfm.Unlock() + + m.counters = make(map[string]uint64) + m.counterFuncs = make(map[string]func() uint64) + m.gauges = make(map[string]string) + m.gaugeFuncs = make(map[string]func() int64) + m.histograms = make(map[string]*Histogram) + m.text = make(map[string]string) + m.textFuncs = make(map[string]func() string) +} + +// snapshot returns a copy of the values of all registered counters and gauges. +func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) { + m.cm.Lock() + defer m.cm.Unlock() + + m.cfm.Lock() + defer m.cfm.Unlock() + + m.gm.Lock() + defer m.gm.Unlock() + + m.gfm.Lock() + defer m.gfm.Unlock() + + m.hm.Lock() + defer m.hm.Unlock() + + m.tm.Lock() + defer m.tm.Unlock() + + m.tfm.Lock() + defer m.tfm.Unlock() + + c = make(map[string]uint64, len(m.counters)+len(m.counterFuncs)) + for n, v := range m.counters { + c[n] = v + } + + for n, f := range m.counterFuncs { + c[n] = f() + } + + //g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs)) + g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) + for n, v := range m.gauges { + g[n] = v + } + + for n, f := range m.gaugeFuncs { + g[n] = m.gaugeValString(f()) + } + + h = make(map[string]*circonusllhist.Histogram, len(m.histograms)) + for n, hist := range m.histograms { + h[n] = hist.hist.CopyAndReset() + } + + t = make(map[string]string, len(m.text)+len(m.textFuncs)) + for n, v := range m.text { + t[n] = v + } + + for n, f := range m.textFuncs { + t[n] = f() + } + + return +} diff --git a/vendor/github.com/circonus-labs/circonusllhist/LICENSE b/vendor/github.com/circonus-labs/circonusllhist/LICENSE new file mode 100644 index 000000000..dc014a4ac --- /dev/null +++ b/vendor/github.com/circonus-labs/circonusllhist/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2016 Circonus, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name Circonus, Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go b/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go new file mode 100644 index 000000000..cf4f482c1 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go @@ -0,0 +1,555 @@ +// Copyright 2016, Circonus, Inc. All rights reserved. +// See the LICENSE file. + +// Package circllhist provides an implementation of Circonus' fixed log-linear +// histogram data structure. This allows tracking of histograms in a +// composable way such that accurate error can be reasoned about. +package circonusllhist + +import ( + "bytes" + "errors" + "fmt" + "math" + "sync" +) + +const ( + DEFAULT_HIST_SIZE = int16(100) +) + +var power_of_ten = [...]float64{ + 1, 10, 100, 1000, 10000, 100000, 1e+06, 1e+07, 1e+08, 1e+09, 1e+10, + 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, + 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, + 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, + 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, + 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, + 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, + 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, + 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, + 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, + 1e+101, 1e+102, 1e+103, 1e+104, 1e+105, 1e+106, 1e+107, 1e+108, 1e+109, + 1e+110, 1e+111, 1e+112, 1e+113, 1e+114, 1e+115, 1e+116, 1e+117, 1e+118, + 1e+119, 1e+120, 1e+121, 1e+122, 1e+123, 1e+124, 1e+125, 1e+126, 1e+127, + 1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120, + 1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111, + 1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, 1e-102, + 1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96, + 1e-95, 1e-94, 1e-93, 1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86, + 1e-85, 1e-84, 1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76, + 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66, + 1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, + 1e-55, 1e-54, 1e-53, 1e-52, 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, + 1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, + 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, 1e-28, 1e-27, 1e-26, + 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, 1e-17, 1e-16, + 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-09, 1e-08, 1e-07, 1e-06, + 1e-05, 0.0001, 0.001, 0.01, 0.1, +} + +// A Bracket is a part of a cumulative distribution. +type Bin struct { + val int8 + exp int8 + count uint64 +} + +func NewBinRaw(val int8, exp int8, count uint64) *Bin { + return &Bin{ + val: val, + exp: exp, + count: count, + } +} +func NewBin() *Bin { + return NewBinRaw(0, 0, 0) +} +func NewBinFromFloat64(d float64) *Bin { + hb := NewBinRaw(0, 0, 0) + hb.SetFromFloat64(d) + return hb +} +func (hb *Bin) SetFromFloat64(d float64) *Bin { + hb.val = -1 + if math.IsInf(d, 0) || math.IsNaN(d) { + return hb + } + if d == 0.0 { + hb.val = 0 + return hb + } + sign := 1 + if math.Signbit(d) { + sign = -1 + } + d = math.Abs(d) + big_exp := int(math.Floor(math.Log10(d))) + hb.exp = int8(big_exp) + if int(hb.exp) != big_exp { //rolled + hb.exp = 0 + if big_exp < 0 { + hb.val = 0 + } + return hb + } + d = d / hb.PowerOfTen() + d = d * 10 + hb.val = int8(sign * int(math.Floor(d+1e-13))) + if hb.val == 100 || hb.val == -100 { + if hb.exp < 127 { + hb.val = hb.val / 10 + hb.exp++ + } else { + hb.val = 0 + hb.exp = 0 + } + } + if hb.val == 0 { + hb.exp = 0 + return hb + } + if !((hb.val >= 10 && hb.val < 100) || + (hb.val <= -10 && hb.val > -100)) { + hb.val = -1 + hb.exp = 0 + } + return hb +} +func (hb *Bin) PowerOfTen() float64 { + idx := int(hb.exp) + if idx < 0 { + idx = 256 + idx + } + return power_of_ten[idx] +} + +func (hb *Bin) IsNaN() bool { + if hb.val > 99 || hb.val < -99 { + return true + } + return false +} +func (hb *Bin) Val() int8 { + return hb.val +} +func (hb *Bin) Exp() int8 { + return hb.exp +} +func (hb *Bin) Count() uint64 { + return hb.count +} +func (hb *Bin) Value() float64 { + if hb.IsNaN() { + return math.NaN() + } + if hb.val < 10 && hb.val > -10 { + return 0.0 + } + return (float64(hb.val) / 10.0) * hb.PowerOfTen() +} +func (hb *Bin) BinWidth() float64 { + if hb.IsNaN() { + return math.NaN() + } + if hb.val < 10 && hb.val > -10 { + return 0.0 + } + return hb.PowerOfTen() / 10.0 +} +func (hb *Bin) Midpoint() float64 { + if hb.IsNaN() { + return math.NaN() + } + out := hb.Value() + if out == 0 { + return 0 + } + interval := hb.BinWidth() + if out < 0 { + interval = interval * -1 + } + return out + interval/2.0 +} +func (hb *Bin) Left() float64 { + if hb.IsNaN() { + return math.NaN() + } + out := hb.Value() + if out >= 0 { + return out + } + return out - hb.BinWidth() +} + +func (h1 *Bin) Compare(h2 *Bin) int { + if h1.val == h2.val && h1.exp == h2.exp { + return 0 + } + if h1.val == -1 { + return 1 + } + if h2.val == -1 { + return -1 + } + if h1.val == 0 { + if h2.val > 0 { + return 1 + } + return -1 + } + if h2.val == 0 { + if h1.val < 0 { + return 1 + } + return -1 + } + if h1.val < 0 && h2.val > 0 { + return 1 + } + if h1.val > 0 && h2.val < 0 { + return -1 + } + if h1.exp == h2.exp { + if h1.val < h2.val { + return 1 + } + return -1 + } + if h1.exp > h2.exp { + if h1.val < 0 { + return 1 + } + return -1 + } + if h1.exp < h2.exp { + if h1.val < 0 { + return -1 + } + return 1 + } + return 0 +} + +// This histogram structure tracks values are two decimal digits of precision +// with a bounded error that remains bounded upon composition +type Histogram struct { + mutex sync.Mutex + bvs []Bin + used int16 + allocd int16 +} + +// New returns a new Histogram +func New() *Histogram { + return &Histogram{ + allocd: DEFAULT_HIST_SIZE, + used: 0, + bvs: make([]Bin, DEFAULT_HIST_SIZE), + } +} + +// Max returns the approximate maximum recorded value. +func (h *Histogram) Max() float64 { + return h.ValueAtQuantile(1.0) +} + +// Min returns the approximate minimum recorded value. +func (h *Histogram) Min() float64 { + return h.ValueAtQuantile(0.0) +} + +// Mean returns the approximate arithmetic mean of the recorded values. +func (h *Histogram) Mean() float64 { + return h.ApproxMean() +} + +// Reset forgets all bins in the histogram (they remain allocated) +func (h *Histogram) Reset() { + h.mutex.Lock() + h.used = 0 + h.mutex.Unlock() +} + +// RecordValue records the given value, returning an error if the value is out +// of range. +func (h *Histogram) RecordValue(v float64) error { + return h.RecordValues(v, 1) +} + +// RecordCorrectedValue records the given value, correcting for stalls in the +// recording process. This only works for processes which are recording values +// at an expected interval (e.g., doing jitter analysis). Processes which are +// recording ad-hoc values (e.g., latency for incoming requests) can't take +// advantage of this. +// CH Compat +func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { + if err := h.RecordValue(float64(v)); err != nil { + return err + } + + if expectedInterval <= 0 || v <= expectedInterval { + return nil + } + + missingValue := v - expectedInterval + for missingValue >= expectedInterval { + if err := h.RecordValue(float64(missingValue)); err != nil { + return err + } + missingValue -= expectedInterval + } + + return nil +} + +// find where a new bin should go +func (h *Histogram) InternalFind(hb *Bin) (bool, int16) { + if h.used == 0 { + return false, 0 + } + rv := -1 + idx := int16(0) + l := int16(0) + r := h.used - 1 + for l < r { + check := (r + l) / 2 + rv = h.bvs[check].Compare(hb) + if rv == 0 { + l = check + r = check + } else if rv > 0 { + l = check + 1 + } else { + r = check - 1 + } + } + if rv != 0 { + rv = h.bvs[l].Compare(hb) + } + idx = l + if rv == 0 { + return true, idx + } + if rv < 0 { + return false, idx + } + idx++ + return false, idx +} + +func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 { + h.mutex.Lock() + defer h.mutex.Unlock() + if count == 0 { + return 0 + } + found, idx := h.InternalFind(hb) + if !found { + if h.used == h.allocd { + new_bvs := make([]Bin, h.allocd+DEFAULT_HIST_SIZE) + if idx > 0 { + copy(new_bvs[0:], h.bvs[0:idx]) + } + if idx < h.used { + copy(new_bvs[idx+1:], h.bvs[idx:]) + } + h.allocd = h.allocd + DEFAULT_HIST_SIZE + h.bvs = new_bvs + } else { + copy(h.bvs[idx+1:], h.bvs[idx:h.used]) + } + h.bvs[idx].val = hb.val + h.bvs[idx].exp = hb.exp + h.bvs[idx].count = uint64(count) + h.used++ + return h.bvs[idx].count + } + var newval uint64 + if count < 0 { + newval = h.bvs[idx].count - uint64(-count) + } else { + newval = h.bvs[idx].count + uint64(count) + } + if newval < h.bvs[idx].count { //rolled + newval = ^uint64(0) + } + h.bvs[idx].count = newval + return newval - h.bvs[idx].count +} + +// RecordValues records n occurrences of the given value, returning an error if +// the value is out of range. +func (h *Histogram) RecordValues(v float64, n int64) error { + var hb Bin + hb.SetFromFloat64(v) + h.InsertBin(&hb, n) + return nil +} + +// Approximate mean +func (h *Histogram) ApproxMean() float64 { + h.mutex.Lock() + defer h.mutex.Unlock() + divisor := 0.0 + sum := 0.0 + for i := int16(0); i < h.used; i++ { + midpoint := h.bvs[i].Midpoint() + cardinality := float64(h.bvs[i].count) + divisor += cardinality + sum += midpoint * cardinality + } + if divisor == 0.0 { + return math.NaN() + } + return sum / divisor +} + +// Approximate sum +func (h *Histogram) ApproxSum() float64 { + h.mutex.Lock() + defer h.mutex.Unlock() + sum := 0.0 + for i := int16(0); i < h.used; i++ { + midpoint := h.bvs[i].Midpoint() + cardinality := float64(h.bvs[i].count) + sum += midpoint * cardinality + } + return sum +} + +func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { + h.mutex.Lock() + defer h.mutex.Unlock() + q_out := make([]float64, len(q_in)) + i_q, i_b := 0, int16(0) + total_cnt, bin_width, bin_left, lower_cnt, upper_cnt := 0.0, 0.0, 0.0, 0.0, 0.0 + if len(q_in) == 0 { + return q_out, nil + } + // Make sure the requested quantiles are in order + for i_q = 1; i_q < len(q_in); i_q++ { + if q_in[i_q-1] > q_in[i_q] { + return nil, errors.New("out of order") + } + } + // Add up the bins + for i_b = 0; i_b < h.used; i_b++ { + if !h.bvs[i_b].IsNaN() { + total_cnt += float64(h.bvs[i_b].count) + } + } + if total_cnt == 0.0 { + return nil, errors.New("empty_histogram") + } + + for i_q = 0; i_q < len(q_in); i_q++ { + if q_in[i_q] < 0.0 || q_in[i_q] > 1.0 { + return nil, errors.New("out of bound quantile") + } + q_out[i_q] = total_cnt * q_in[i_q] + } + + for i_b = 0; i_b < h.used; i_b++ { + if h.bvs[i_b].IsNaN() { + continue + } + bin_width = h.bvs[i_b].BinWidth() + bin_left = h.bvs[i_b].Left() + lower_cnt = upper_cnt + upper_cnt = lower_cnt + float64(h.bvs[i_b].count) + break + } + for i_q = 0; i_q < len(q_in); i_q++ { + for i_b < (h.used-1) && upper_cnt < q_out[i_q] { + i_b++ + bin_width = h.bvs[i_b].BinWidth() + bin_left = h.bvs[i_b].Left() + lower_cnt = upper_cnt + upper_cnt = lower_cnt + float64(h.bvs[i_b].count) + } + if lower_cnt == q_out[i_q] { + q_out[i_q] = bin_left + } else if upper_cnt == q_out[i_q] { + q_out[i_q] = bin_left + bin_width + } else { + if bin_width == 0 { + q_out[i_q] = bin_left + } else { + q_out[i_q] = bin_left + (q_out[i_q]-lower_cnt)/(upper_cnt-lower_cnt)*bin_width + } + } + } + return q_out, nil +} + +// ValueAtQuantile returns the recorded value at the given quantile (0..1). +func (h *Histogram) ValueAtQuantile(q float64) float64 { + h.mutex.Lock() + defer h.mutex.Unlock() + q_in := make([]float64, 1) + q_in[0] = q + q_out, err := h.ApproxQuantile(q_in) + if err == nil && len(q_out) == 1 { + return q_out[0] + } + return math.NaN() +} + +// SignificantFigures returns the significant figures used to create the +// histogram +// CH Compat +func (h *Histogram) SignificantFigures() int64 { + return 2 +} + +// Equals returns true if the two Histograms are equivalent, false if not. +func (h *Histogram) Equals(other *Histogram) bool { + h.mutex.Lock() + other.mutex.Lock() + defer h.mutex.Unlock() + defer other.mutex.Unlock() + switch { + case + h.used != other.used: + return false + default: + for i := int16(0); i < h.used; i++ { + if h.bvs[i].Compare(&other.bvs[i]) != 0 { + return false + } + if h.bvs[i].count != other.bvs[i].count { + return false + } + } + } + return true +} + +func (h *Histogram) CopyAndReset() *Histogram { + h.mutex.Lock() + defer h.mutex.Unlock() + newhist := &Histogram{ + allocd: h.allocd, + used: h.used, + bvs: h.bvs, + } + h.allocd = DEFAULT_HIST_SIZE + h.bvs = make([]Bin, DEFAULT_HIST_SIZE) + h.used = 0 + return newhist +} +func (h *Histogram) DecStrings() []string { + h.mutex.Lock() + defer h.mutex.Unlock() + out := make([]string, h.used) + for i, bin := range h.bvs[0:h.used] { + var buffer bytes.Buffer + buffer.WriteString("H[") + buffer.WriteString(fmt.Sprintf("%3.1e", bin.Value())) + buffer.WriteString("]=") + buffer.WriteString(fmt.Sprintf("%v", bin.count)) + out[i] = buffer.String() + } + return out +} diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS b/vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS new file mode 100644 index 000000000..b430b78ea --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/AUTHORS @@ -0,0 +1,8 @@ +These people have provided bug fixes, new features or improved the documentation. + +* Daniel Garcia +* Peter Teichman +* Phillip Kovalev +* Richard Crowley +* Timothée Peignier +* Tomás Senart diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE b/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE new file mode 100644 index 000000000..fdfc8abff --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/LICENSE @@ -0,0 +1,22 @@ +Copyright 2015 Timothée Peignier. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/README.md b/vendor/github.com/cyberdelia/go-metrics-graphite/README.md new file mode 100644 index 000000000..e43f4df82 --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/README.md @@ -0,0 +1,19 @@ +This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) +library which will post the metrics to Graphite. It was originally part of the +`go-metrics` library itself, but has been split off to make maintenance of +both the core library and the client easier. + +### Usage + +```go +import "github.com/cyberdelia/go-metrics-graphite" + + +go graphite.Graphite(metrics.DefaultRegistry, + 1*time.Second, "some.prefix", addr) +``` + +### Migrating from `rcrowley/go-metrics` implementation + +Simply modify the import from `"github.com/rcrowley/go-metrics/librato"` to +`"github.com/cyberdelia/go-metrics-graphite"` and it should Just Work. diff --git a/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go b/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go new file mode 100644 index 000000000..ffc309b81 --- /dev/null +++ b/vendor/github.com/cyberdelia/go-metrics-graphite/graphite.go @@ -0,0 +1,113 @@ +package graphite + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" + + "github.com/rcrowley/go-metrics" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry metrics.Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r metrics.Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case metrics.Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case metrics.Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case metrics.GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case metrics.Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case metrics.Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case metrics.Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx]/du, now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/fatih/structs/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml new file mode 100644 index 000000000..845012b7a --- /dev/null +++ b/vendor/github.com/fatih/structs/.travis.yml @@ -0,0 +1,11 @@ +language: go +go: + - 1.6 + - tip +sudo: false +before_install: +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +script: +- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE new file mode 100644 index 000000000..34504e4b3 --- /dev/null +++ b/vendor/github.com/fatih/structs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md new file mode 100644 index 000000000..44e01006e --- /dev/null +++ b/vendor/github.com/fatih/structs/README.md @@ -0,0 +1,163 @@ +# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) + +Structs contains various utilities to work with Go (Golang) structs. It was +initially used by me to convert a struct into a `map[string]interface{}`. With +time I've added other utilities for structs. It's basically a high level +package based on primitives from the reflect package. Feel free to add new +functions or improve the existing code. + +## Install + +```bash +go get github.com/fatih/structs +``` + +## Usage and Examples + +Just like the standard lib `strings`, `bytes` and co packages, `structs` has +many global functions to manipulate or organize your struct data. Lets define +and declare a struct: + +```go +type Server struct { + Name string `json:"name,omitempty"` + ID int + Enabled bool + users []string // not exported + http.Server // embedded +} + +server := &Server{ + Name: "gopher", + ID: 123456, + Enabled: true, +} +``` + +```go +// Convert a struct to a map[string]interface{} +// => {"Name":"gopher", "ID":123456, "Enabled":true} +m := structs.Map(server) + +// Convert the values of a struct to a []interface{} +// => ["gopher", 123456, true] +v := structs.Values(server) + +// Convert the names of a struct to a []string +// (see "Names methods" for more info about fields) +n := structs.Names(server) + +// Convert the values of a struct to a []*Field +// (see "Field methods" for more info about fields) +f := structs.Fields(server) + +// Return the struct name => "Server" +n := structs.Name(server) + +// Check if any field of a struct is initialized or not. +h := structs.HasZero(server) + +// Check if all fields of a struct is initialized or not. +z := structs.IsZero(server) + +// Check if server is a struct or a pointer to struct +i := structs.IsStruct(server) +``` + +### Struct methods + +The structs functions can be also used as independent methods by creating a new +`*structs.Struct`. This is handy if you want to have more control over the +structs (such as retrieving a single Field). + +```go +// Create a new struct type: +s := structs.New(server) + +m := s.Map() // Get a map[string]interface{} +v := s.Values() // Get a []interface{} +f := s.Fields() // Get a []*Field +n := s.Names() // Get a []string +f := s.Field(name) // Get a *Field based on the given field name +f, ok := s.FieldOk(name) // Get a *Field based on the given field name +n := s.Name() // Get the struct name +h := s.HasZero() // Check if any field is initialized +z := s.IsZero() // Check if all fields are initialized +``` + +### Field methods + +We can easily examine a single Field for more detail. Below you can see how we +get and interact with various field methods: + + +```go +s := structs.New(server) + +// Get the Field struct for the "Name" field +name := s.Field("Name") + +// Get the underlying value, value => "gopher" +value := name.Value().(string) + +// Set the field's value +name.Set("another gopher") + +// Get the field's kind, kind => "string" +name.Kind() + +// Check if the field is exported or not +if name.IsExported() { + fmt.Println("Name field is exported") +} + +// Check if the value is a zero value, such as "" for string, 0 for int +if !name.IsZero() { + fmt.Println("Name is initialized") +} + +// Check if the field is an anonymous (embedded) field +if !name.IsEmbedded() { + fmt.Println("Name is not an embedded field") +} + +// Get the Field's tag value for tag name "json", tag value => "name,omitempty" +tagValue := name.Tag("json") +``` + +Nested structs are supported too: + +```go +addrField := s.Field("Server").Field("Addr") + +// Get the value for addr +a := addrField.Value().(string) + +// Or get all fields +httpServer := s.Field("Server").Fields() +``` + +We can also get a slice of Fields from the Struct type to iterate over all +fields. This is handy if you wish to examine all fields: + +```go +s := structs.New(server) + +for _, f := range s.Fields() { + fmt.Printf("field name: %+v\n", f.Name()) + + if f.IsExported() { + fmt.Printf("value : %+v\n", f.Value()) + fmt.Printf("is zero : %+v\n", f.IsZero()) + } +} +``` + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * [Cihangir Savas](https://github.com/cihangir) + +## License + +The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go new file mode 100644 index 000000000..e69783230 --- /dev/null +++ b/vendor/github.com/fatih/structs/field.go @@ -0,0 +1,141 @@ +package structs + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errNotExported = errors.New("field is not exported") + errNotSettable = errors.New("field is not settable") +) + +// Field represents a single struct field that encapsulates high level +// functions around the field. +type Field struct { + value reflect.Value + field reflect.StructField + defaultTag string +} + +// Tag returns the value associated with key in the tag string. If there is no +// such key in the tag, Tag returns the empty string. +func (f *Field) Tag(key string) string { + return f.field.Tag.Get(key) +} + +// Value returns the underlying value of the field. It panics if the field +// is not exported. +func (f *Field) Value() interface{} { + return f.value.Interface() +} + +// IsEmbedded returns true if the given field is an anonymous field (embedded) +func (f *Field) IsEmbedded() bool { + return f.field.Anonymous +} + +// IsExported returns true if the given field is exported. +func (f *Field) IsExported() bool { + return f.field.PkgPath == "" +} + +// IsZero returns true if the given field is not initialized (has a zero value). +// It panics if the field is not exported. +func (f *Field) IsZero() bool { + zero := reflect.Zero(f.value.Type()).Interface() + current := f.Value() + + return reflect.DeepEqual(current, zero) +} + +// Name returns the name of the given field +func (f *Field) Name() string { + return f.field.Name +} + +// Kind returns the fields kind, such as "string", "map", "bool", etc .. +func (f *Field) Kind() reflect.Kind { + return f.value.Kind() +} + +// Set sets the field to given value v. It returns an error if the field is not +// settable (not addressable or not exported) or if the given value's type +// doesn't match the fields type. +func (f *Field) Set(val interface{}) error { + // we can't set unexported fields, so be sure this field is exported + if !f.IsExported() { + return errNotExported + } + + // do we get here? not sure... + if !f.value.CanSet() { + return errNotSettable + } + + given := reflect.ValueOf(val) + + if f.value.Kind() != given.Kind() { + return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind()) + } + + f.value.Set(given) + return nil +} + +// Zero sets the field to its zero value. It returns an error if the field is not +// settable (not addressable or not exported). +func (f *Field) Zero() error { + zero := reflect.Zero(f.value.Type()).Interface() + return f.Set(zero) +} + +// Fields returns a slice of Fields. This is particular handy to get the fields +// of a nested struct . A struct tag with the content of "-" ignores the +// checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field *http.Request `structs:"-"` +// +// It panics if field is not exported or if field's kind is not struct +func (f *Field) Fields() []*Field { + return getFields(f.value, f.defaultTag) +} + +// Field returns the field from a nested struct. It panics if the nested struct +// is not exported or if the field was not found. +func (f *Field) Field(name string) *Field { + field, ok := f.FieldOk(name) + if !ok { + panic("field not found") + } + + return field +} + +// FieldOk returns the field from a nested struct. The boolean returns whether +// the field was found (true) or not (false). +func (f *Field) FieldOk(name string) (*Field, bool) { + value := &f.value + // value must be settable so we need to make sure it holds the address of the + // variable and not a copy, so we can pass the pointer to strctVal instead of a + // copy (which is not assigned to any variable, hence not settable). + // see "https://blog.golang.org/laws-of-reflection#TOC_8." + if f.value.Kind() != reflect.Ptr { + a := f.value.Addr() + value = &a + } + v := strctVal(value.Interface()) + t := v.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: v.FieldByName(name), + }, true +} diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go new file mode 100644 index 000000000..39eb083d3 --- /dev/null +++ b/vendor/github.com/fatih/structs/structs.go @@ -0,0 +1,507 @@ +// Package structs contains various utilities functions to work with structs. +package structs + +import ( + "fmt" + + "reflect" +) + +var ( + // DefaultTagName is the default tag name for struct fields which provides + // a more granular to tweak certain structs. Lookup the necessary functions + // for more info. + DefaultTagName = "structs" // struct's field default tag name +) + +// Struct encapsulates a struct type to provide several high level functions +// around the struct. +type Struct struct { + raw interface{} + value reflect.Value + TagName string +} + +// New returns a new *Struct with the struct s. It panics if the s's kind is +// not struct. +func New(s interface{}) *Struct { + return &Struct{ + raw: s, + value: strctVal(s), + TagName: DefaultTagName, + } +} + +// Map converts the given struct to a map[string]interface{}, where the keys +// of the map are the field names and the values of the map the associated +// values of the fields. The default key string is the struct field name but +// can be changed in the struct field's tag value. The "structs" key in the +// struct's field tag value is the key name. Example: +// +// // Field appears in map as key "myName". +// Name string `structs:"myName"` +// +// A tag value with the content of "-" ignores that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A tag value with the content of "string" uses the stringer to get the value. Example: +// +// // The value will be output of Animal's String() func. +// // Map will panic if Animal does not implement String(). +// Field *Animal `structs:"field,string"` +// +// A tag value with the option of "flatten" used in a struct field is to flatten its fields +// in the output map. Example: +// +// // The FieldStruct's fields will be flattened into the output map. +// FieldStruct time.Time `structs:"flatten"` +// +// A tag value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field if +// the field value is empty. Example: +// +// // Field appears in map as key "myName", but the field is +// // skipped if empty. +// Field string `structs:"myName,omitempty"` +// +// // Field appears in map as key "Field" (the default), but +// // the field is skipped if empty. +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Map() map[string]interface{} { + out := make(map[string]interface{}) + s.FillMap(out) + return out +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func (s *Struct) FillMap(out map[string]interface{}) { + if out == nil { + return + } + + fields := s.structFields() + + for _, field := range fields { + name := field.Name + val := s.value.FieldByName(name) + isSubStruct := false + var finalVal interface{} + + tagName, tagOpts := parseTag(field.Tag.Get(s.TagName)) + if tagName != "" { + name = tagName + } + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + // look out for embedded structs, and convert them to a + // map[string]interface{} too + n := New(val.Interface()) + n.TagName = s.TagName + m := n.Map() + isSubStruct = true + if len(m) == 0 { + finalVal = val.Interface() + } else { + finalVal = m + } + } else { + finalVal = val.Interface() + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + out[name] = s.String() + } + continue + } + + if isSubStruct && (tagOpts.Has("flatten")) { + for k := range finalVal.(map[string]interface{}) { + out[k] = finalVal.(map[string]interface{})[k] + } + } else { + out[name] = finalVal + } + } +} + +// Values converts the given s struct's field values to a []interface{}. A +// struct tag with the content of "-" ignores the that particular field. +// Example: +// +// // Field is ignored by this package. +// Field int `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Fields is not processed further by this package. +// Field time.Time `structs:",omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field and +// is not added to the values if the field value is empty. Example: +// +// // Field is skipped if empty +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Values() []interface{} { + fields := s.structFields() + + var t []interface{} + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + t = append(t, s.String()) + } + continue + } + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + // look out for embedded structs, and convert them to a + // []interface{} to be added to the final values slice + for _, embeddedVal := range Values(val.Interface()) { + t = append(t, embeddedVal) + } + } else { + t = append(t, val.Interface()) + } + } + + return t +} + +// Fields returns a slice of Fields. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Fields() []*Field { + return getFields(s.value, s.TagName) +} + +// Names returns a slice of field names. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Names() []string { + fields := getFields(s.value, s.TagName) + + names := make([]string, len(fields)) + + for i, field := range fields { + names[i] = field.Name() + } + + return names +} + +func getFields(v reflect.Value, tagName string) []*Field { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + t := v.Type() + + var fields []*Field + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if tag := field.Tag.Get(tagName); tag == "-" { + continue + } + + f := &Field{ + field: field, + value: v.FieldByName(field.Name), + } + + fields = append(fields, f) + + } + + return fields +} + +// Field returns a new Field struct that provides several high level functions +// around a single struct field entity. It panics if the field is not found. +func (s *Struct) Field(name string) *Field { + f, ok := s.FieldOk(name) + if !ok { + panic("field not found") + } + + return f +} + +// FieldOk returns a new Field struct that provides several high level functions +// around a single struct field entity. The boolean returns true if the field +// was found. +func (s *Struct) FieldOk(name string) (*Field, bool) { + t := s.value.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: s.value.FieldByName(name), + defaultTag: s.TagName, + }, true +} + +// IsZero returns true if all fields in a struct is a zero value (not +// initialized) A struct tag with the content of "-" ignores the checking of +// that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) IsZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := IsZero(val.Interface()) + if !ok { + return false + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if !reflect.DeepEqual(current, zero) { + return false + } + } + + return true +} + +// HasZero returns true if a field in a struct is not initialized (zero value). +// A struct tag with the content of "-" ignores the checking of that particular +// field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) HasZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := HasZero(val.Interface()) + if ok { + return true + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + return true + } + } + + return false +} + +// Name returns the structs's type name within its package. For more info refer +// to Name() function. +func (s *Struct) Name() string { + return s.value.Type().Name() +} + +// structFields returns the exported struct fields for a given s struct. This +// is a convenient helper method to avoid duplicate code in some of the +// functions. +func (s *Struct) structFields() []reflect.StructField { + t := s.value.Type() + + var f []reflect.StructField + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // we can't access the value of unexported fields + if field.PkgPath != "" { + continue + } + + // don't check if it's omitted + if tag := field.Tag.Get(s.TagName); tag == "-" { + continue + } + + f = append(f, field) + } + + return f +} + +func strctVal(s interface{}) reflect.Value { + v := reflect.ValueOf(s) + + // if pointer get the underlying element≤ + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + panic("not struct") + } + + return v +} + +// Map converts the given struct to a map[string]interface{}. For more info +// refer to Struct types Map() method. It panics if s's kind is not struct. +func Map(s interface{}) map[string]interface{} { + return New(s).Map() +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func FillMap(s interface{}, out map[string]interface{}) { + New(s).FillMap(out) +} + +// Values converts the given struct to a []interface{}. For more info refer to +// Struct types Values() method. It panics if s's kind is not struct. +func Values(s interface{}) []interface{} { + return New(s).Values() +} + +// Fields returns a slice of *Field. For more info refer to Struct types +// Fields() method. It panics if s's kind is not struct. +func Fields(s interface{}) []*Field { + return New(s).Fields() +} + +// Names returns a slice of field names. For more info refer to Struct types +// Names() method. It panics if s's kind is not struct. +func Names(s interface{}) []string { + return New(s).Names() +} + +// IsZero returns true if all fields is equal to a zero value. For more info +// refer to Struct types IsZero() method. It panics if s's kind is not struct. +func IsZero(s interface{}) bool { + return New(s).IsZero() +} + +// HasZero returns true if any field is equal to a zero value. For more info +// refer to Struct types HasZero() method. It panics if s's kind is not struct. +func HasZero(s interface{}) bool { + return New(s).HasZero() +} + +// IsStruct returns true if the given variable is a struct or a pointer to +// struct. +func IsStruct(s interface{}) bool { + v := reflect.ValueOf(s) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // uninitialized zero value of a struct + if v.Kind() == reflect.Invalid { + return false + } + + return v.Kind() == reflect.Struct +} + +// Name returns the structs's type name within its package. It returns an +// empty string for unnamed types. It panics if s's kind is not struct. +func Name(s interface{}) string { + return New(s).Name() +} diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go new file mode 100644 index 000000000..8859341c1 --- /dev/null +++ b/vendor/github.com/fatih/structs/tags.go @@ -0,0 +1,32 @@ +package structs + +import "strings" + +// tagOptions contains a slice of tag options +type tagOptions []string + +// Has returns true if the given optiton is available in tagOptions +func (t tagOptions) Has(opt string) bool { + for _, tagOpt := range t { + if tagOpt == opt { + return true + } + } + + return false +} + +// parseTag splits a struct field's tag into its name and a list of options +// which comes after a name. A tag is in the form of: "name,option1,option2". +// The name can be neglectected. +func parseTag(tag string) (string, tagOptions) { + // tag is one of followings: + // "" + // "name" + // "name,opt" + // "name,opt,opt2" + // ",opt" + + res := strings.Split(tag, ",") + return res[0], res[1:] +} diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore new file mode 100644 index 000000000..b4ae623be --- /dev/null +++ b/vendor/github.com/gobwas/glob/.gitignore @@ -0,0 +1,8 @@ +glob.iml +.idea +*.cpu +*.mem +*.test +*.dot +*.png +*.svg diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml new file mode 100644 index 000000000..95e8fd2fd --- /dev/null +++ b/vendor/github.com/gobwas/glob/.travis.yml @@ -0,0 +1,11 @@ +sudo: false + +language: go + +go: + - 1.7 + - 1.8 + - 1.9 + +script: + - go test -v ./... diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE new file mode 100644 index 000000000..9d4735cad --- /dev/null +++ b/vendor/github.com/gobwas/glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Sergey Kamardin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh new file mode 100644 index 000000000..804cf22e6 --- /dev/null +++ b/vendor/github.com/gobwas/glob/bench.sh @@ -0,0 +1,26 @@ +#! /bin/bash + +bench() { + filename="/tmp/$1-$2.bench" + if test -e "${filename}"; + then + echo "Already exists ${filename}" + else + backup=`git rev-parse --abbrev-ref HEAD` + git checkout $1 + echo -n "Creating ${filename}... " + go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 000000000..02e7de80a --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 000000000..2afde343a --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 000000000..514a9a5c4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 000000000..8e65356cd --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 000000000..8302bf824 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,185 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (index int, segments []int) { + //inputLen := len(s) + //// try to cut unnecessary parts + //// by knowledge of length of right and left part + //offset, limit := self.offsetLimit(inputLen) + //for offset < limit { + // // search for matching part in substring + // vi, segments := self.Value.Index(s[offset:limit]) + // if index == -1 { + // return -1, nil + // } + // if self.Left == nil { + // if index != offset { + // return -1, nil + // } + // } else { + // left := s[:offset+vi] + // i := self.Left.IndexSuffix(left) + // if i == -1 { + // return -1, nil + // } + // index = i + // } + // if self.Right != nil { + // for _, seg := range segments { + // right := s[:offset+vi+seg] + // } + // } + + // l := s[:offset+index] + // var left bool + // if self.Left != nil { + // left = self.Left.Index(l) + // } else { + // left = l == "" + // } + //} + + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + // try to cut unnecessary parts + // by knowledge of length of right and left part + offset, limit := self.offsetLimit(inputLen) + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) offsetLimit(inputLen int) (offset int, limit int) { + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return 0, 0 + } + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + return offset, limit +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 000000000..0998e95b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 000000000..7c968ee36 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 000000000..7fd763ecd --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 000000000..f80e007fb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 000000000..d72f69eff --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 000000000..db57ac8eb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 000000000..0d4ecd36b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 000000000..a7347250e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 000000000..8ee58fe1b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 000000000..8208085a1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 000000000..ce30245a4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 000000000..4379042e4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 000000000..9ea6f3094 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 000000000..ee6e3954c --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 000000000..85bea8c68 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 000000000..c5106f819 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 000000000..3875950bb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 000000000..0a17616d3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 000000000..f58144e73 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.com") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.com")) + g.Match("*.github.com") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 000000000..3220a694a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 000000000..429b40943 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 000000000..a1c8d1962 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 000000000..2797c4e83 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 000000000..1d168b148 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 000000000..a72355641 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 000000000..e8ee1920b --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 000000000..7e64988f4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,43 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +// Get a new client +client, err := api.NewClient(api.DefaultConfig()) +if err != nil { + panic(err) +} + +// Get a handle to the KV API +kv := client.KV() + +// PUT a new KV pair +p := &api.KVPair{Key: "foo", Value: []byte("test")} +_, err = kv.Put(p, nil) +if err != nil { + panic(err) +} + +// Lookup the pair +pair, _, err := kv.Get("foo", nil) +if err != nil { + panic(err) +} +fmt.Printf("KV: %v", pair) + +``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 000000000..c3fb0d53a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,140 @@ +package api + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 000000000..1893d1cf3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,471 @@ +package api + +import ( + "bufio" + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string + EnableTagOverride bool +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + Script string `json:",omitempty"` + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream +func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + logCh <- scanner.Text() + } + } + }() + + return logCh, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 000000000..f6fe5a1bd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,649 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause reponses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause reponses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object , which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + HttpClient: &http.Client{ + Transport: transportFn(), + }, + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" { + doVerify, err := strconv.ParseBool(verify) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + + if !doVerify { + tlsClientConfig, err := SetupTLSConfig(&TLSConfig{ + InsecureSkipVerify: true, + }) + + // We don't expect this to fail given that we aren't + // parsing any of the input, but we panic just in case + // since this doesn't have an error return. + if err != nil { + panic(err) + } + + transport := transportFn() + transport.TLSClientConfig = tlsClientConfig + config.HttpClient.Transport = transport + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsConfig.CAFile != "" { + data, err := ioutil.ReadFile(tlsConfig.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to read CA file: %v", err) + } + + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("failed to parse CA certificate") + } + tlsClientConfig.RootCAs = caPool + } + + return tlsClientConfig, nil +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsServerError returns true for 500 errors from the Consul servers, these are +// usually retryable at a later time. +func IsServerError(err error) bool { + if err == nil { + return false + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Now().Sub(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 000000000..96226f11f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,194 @@ +package api + +type Node struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + Meta map[string]string +} + +type CatalogService struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServicePort int + ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 000000000..ae8d16ee6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,67 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 000000000..85b5b069b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 000000000..8abe2393a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,199 @@ +package api + +import ( + "fmt" + "strings" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 000000000..44e06bbb4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,419 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 000000000..9f9845a43 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,384 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + if s, err := l.createSession(); err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } else { + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 000000000..079e22486 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 000000000..7b0e461e9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,168 @@ +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. +package api + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 000000000..59471ecf9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,215 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 000000000..4f91c3543 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,83 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 000000000..5f3c25b13 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,86 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfigration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 000000000..ff210de3f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,198 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PrepatedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 000000000..745a208c9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 000000000..e6645ac1d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,512 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + if sess, err := s.createSession(); err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } else { + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 000000000..36e99a389 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,217 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 000000000..e902377dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 000000000..74ef61a67 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..1c95f5978 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, ErrNotExist) { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..a733bef18 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..f4596d80c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,53 @@ +package cleanhttp + +import ( + "net" + "net/http" + "time" +) + +// DefaultTransport returns a new http.Transport with the same default values +// as http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: false, + MaxIdleConnsPerHost: 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with the same default values +// as http.Client, but with a shared Transport. Do not use this function +// for transient clients as it can leak file descriptors over time. Only use +// this for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 000000000..05841092a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000..e81be50e0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,91 @@ +# go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000..00afa9b35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,37 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + err.Errors = append(err.Errors, e.Errors...) + default: + err.Errors = append(err.Errors, e) + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000..aab8e9abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000..bb65a12e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,23 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d error(s) occurred:\n\n%s", + len(es), strings.Join(points, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000..2ea082732 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementatin of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implementd only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000..5c477abe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 000000000..caab963a3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,3 @@ +.idea/ +*.iml +*.test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml new file mode 100644 index 000000000..54a6c7a22 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6.3 + +branches: + only: + - master + +script: make updatedeps test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 000000000..da17640e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 000000000..0d6f9ed40 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,43 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received, then a retry is invoked after a wait period. +Otherwise, the response is returned and left to the caller to interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) require an `io.ReadSeeker` to be provided. This enables the +request body to be "rewound" if the initial request fails so that the full +request can be attempted again. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 000000000..d0ec6b2ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,302 @@ +// The retryablehttp package provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// The main difference from net/http is that requests which take a request body +// (POST/PUT et. al) require an io.ReadSeeker to be provided. This enables the +// request body to be "rewound" if the initial request fails so that the full +// request can be attempted again. +package retryablehttp + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 5 * time.Minute + defaultRetryMax = 32 + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) +) + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body io.ReadSeeker + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { + // Wrap the body in a noop ReadCloser if non-nil. This prevents the + // reader from being closed by the HTTP client. + var rcBody io.ReadCloser + if body != nil { + rcBody = ioutil.NopCloser(body) + } + + // Make the request with the noop-closer for the body. + httpReq, err := http.NewRequest(method, url, rcBody) + if err != nil { + return nil, err + } + + // Check if we can set the Content-Length automatically. + if lr, ok := body.(LenReader); ok { + httpReq.ContentLength = int64(lr.Len()) + } + + return &Request{body, httpReq}, nil +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(*log.Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(*log.Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckResponse callback to properly close any +// response body before returning. +type CheckRetry func(resp *http.Response, err error) (bool, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger *log.Logger // Customer logger instance. + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultClient(), + Logger: log.New(os.Stderr, "", log.LstdFlags), + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + } +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + return true, err + } + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || resp.StatusCode >= 500 { + return true, nil + } + + return false, nil +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + + for i := 0; ; i++ { + var code int // HTTP response code + + // Always rewind the request body when non-nil. + if req.body != nil { + if _, err := req.body.Seek(0, 0); err != nil { + return nil, fmt.Errorf("failed to seek body: %v", err) + } + } + + if c.RequestLogHook != nil { + c.RequestLogHook(c.Logger, req.Request, i) + } + + // Attempt the request + resp, err := c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(resp, err) + + if err != nil { + c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + c.ResponseLogHook(c.Logger, resp) + } + } + + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + return resp, err + } + + // We're going to retry, consume any response to reuse the connection. + if err == nil { + c.drainBody(resp.Body) + } + + remain := c.RetryMax - i + if remain == 0 { + break + } + wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i) + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if code > 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + time.Sleep(wait) + } + + // Return an error if we fall out of the retry loop + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + c.Logger.Printf("[ERR] error reading response body: %v", err) + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// backoff is used to calculate how long to sleep before retrying +// after observing failures. It takes the minimum/maximum wait time and +// iteration, and returns the duration to wait. +func backoff(min, max time.Duration, iter int) time.Duration { + mult := math.Pow(2, float64(iter)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml new file mode 100644 index 000000000..80e1de44e --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 000000000..c3989e789 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 000000000..f5abffc29 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,43 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 000000000..b55cc6284 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 000000000..aeb30ece3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,103 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When both +// CAFile and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CAPath. + CAFile string + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 000000000..66b1472c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 000000000..a9a040657 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 000000000..15586a2b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 000000000..83dc540ef --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,3 @@ +sudo: false +language: go +go: 1.5 diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 000000000..ad404a811 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,17 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 000000000..3d5b8bd92 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,104 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 000000000..02888d2ab --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,654 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(int(v))) + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(int(v))) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for fieldType, field := range fields { + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, field) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, field) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { + return err + } + } + + decodedFields = append(decodedFields, fieldType.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 000000000..575a20b50 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 000000000..f8bb71a04 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,211 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // associated line comment, only when used in a list + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 000000000..ba07ad42b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 000000000..5c99381df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 000000000..cc129b6c7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,454 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + // object + return keys, nil + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + fmt.Println("illegal") + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // If there is no error, we should be at a RBRACE to end the object + if p.tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + if needComma { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA), + } + } + + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 000000000..a3f34a7b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,629 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 000000000..6cfd8c3a5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,248 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section, except for escaped quotes and escaped backslashes, which we + // handle specifically. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + // We special case escaped double quotes and escaped backslashes in + // interpolations, converting them to their unescaped equivalents. + if r == '\\' { + q, _ := utf8.DecodeRuneInString(s) + switch q { + case '"', '\\': + continue + } + } + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 000000000..6e9949804 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,214 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 000000000..65d56c9b8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,297 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // Done + return keys, nil + case token.ILLEGAL: + fmt.Println("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 000000000..477f71ff3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 000000000..95a0c3eee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 000000000..d9993c292 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 000000000..1fca53c4c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/serf/coordinate/README.md b/vendor/github.com/hashicorp/serf/coordinate/README.md new file mode 100644 index 000000000..0a96fd3eb --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/README.md @@ -0,0 +1 @@ +# TODO - I'll beef this up as I implement each of the enhancements. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 000000000..613bfff89 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,180 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.coord = coord.Clone() +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { + c.mutex.Lock() + defer c.mutex.Unlock() + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + return c.coord.Clone() +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 000000000..a5b3aadfe --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 4 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 000000000..c9194e048 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,183 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i, _ := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i, _ := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i, _ := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i, _ := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i, _ := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 000000000..6fb033c0c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/test_util.go b/vendor/github.com/hashicorp/serf/coordinate/test_util.go new file mode 100644 index 000000000..116e94933 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/test_util.go @@ -0,0 +1,27 @@ +package coordinate + +import ( + "math" + "testing" +) + +// verifyEqualFloats will compare f1 and f2 and fail if they are not +// "equal" within a threshold. +func verifyEqualFloats(t *testing.T, f1 float64, f2 float64) { + const zeroThreshold = 1.0e-6 + if math.Abs(f1-f2) > zeroThreshold { + t.Fatalf("equal assertion fail, %9.6f != %9.6f", f1, f2) + } +} + +// verifyEqualVectors will compare vec1 and vec2 and fail if they are not +// "equal" within a threshold. +func verifyEqualVectors(t *testing.T, vec1 []float64, vec2 []float64) { + if len(vec1) != len(vec2) { + t.Fatalf("vector length mismatch, %d != %d", len(vec1), len(vec2)) + } + + for i, _ := range vec1 { + verifyEqualFloats(t, vec1[i], vec2[i]) + } +} diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/api/SPEC.md b/vendor/github.com/hashicorp/vault/api/SPEC.md new file mode 100644 index 000000000..15345f390 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/SPEC.md @@ -0,0 +1,611 @@ +FORMAT: 1A + +# vault + +The Vault API gives you full access to the Vault project. + +If you're browsing this API specifiction in GitHub or in raw +format, please excuse some of the odd formatting. This document +is in api-blueprint format that is read by viewers such as +Apiary. + +## Sealed vs. Unsealed + +Whenever an individual Vault server is started, it is started +in the _sealed_ state. In this state, it knows where its data +is located, but the data is encrypted and Vault doesn't have the +encryption keys to access it. Before Vault can operate, it must +be _unsealed_. + +**Note:** Sealing/unsealing has no relationship to _authentication_ +which is separate and still required once the Vault is unsealed. + +Instead of being sealed with a single key, we utilize +[Shamir's Secret Sharing](http://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing) +to shard a key into _n_ parts such that _t_ parts are required +to reconstruct the original key, where `t <= n`. This means that +Vault itself doesn't know the original key, and no single person +has the original key (unless `n = 1`, or `t` parts are given to +a single person). + +Unsealing is done via an unauthenticated +[unseal API](#reference/seal/unseal/unseal). This API takes a single +master shard and progresses the unsealing process. Once all shards +are given, the Vault is either unsealed or resets the unsealing +process if the key was invalid. + +The entire seal/unseal state is server-wide. This allows multiple +distinct operators to use the unseal API (or more likely the +`vault unseal` command) from separate computers/networks and never +have to transmit their key in order to unseal the vault in a +distributed fashion. + +## Transport + +The API is expected to be accessed over a TLS connection at +all times, with a valid certificate that is verified by a well +behaved client. + +## Authentication + +Once the Vault is unsealed, every other operation requires +authentication. There are multiple methods for authentication +that can be enabled (see +[authentication](#reference/authentication)). + +Authentication is done with the login endpoint. The login endpoint +returns an access token that is set as the `X-Vault-Token` header. + +## Help + +To retrieve the help for any API within Vault, including mounted +backends, credential providers, etc. then append `?help=1` to any +URL. If you have valid permission to access the path, then the help text +will be returned with the following structure: + + { + "help": "help text" + } + +## Error Response + +A common JSON structure is always returned to return errors: + + { + "errors": [ + "message", + "another message" + ] + } + +This structure will be sent down for any non-20x HTTP status. + +## HTTP Status Codes + +The following HTTP status codes are used throughout the API. + +- `200` - Success with data. +- `204` - Success, no data returned. +- `400` - Invalid request, missing or invalid data. +- `403` - Forbidden, your authentication details are either + incorrect or you don't have access to this feature. +- `404` - Invalid path. This can both mean that the path truly + doesn't exist or that you don't have permission to view a + specific path. We use 404 in some cases to avoid state leakage. +- `429` - Rate limit exceeded. Try again after waiting some period + of time. +- `500` - Internal server error. An internal error has occurred, + try again later. If the error persists, report a bug. +- `503` - Vault is down for maintenance or is currently sealed. + Try again later. + +# Group Initialization + +## Initialization [/sys/init] +### Initialization Status [GET] +Returns the status of whether the vault is initialized or not. The +vault doesn't have to be unsealed for this operation. + ++ Response 200 (application/json) + + { + "initialized": true + } + +### Initialize [POST] +Initialize the vault. This is an unauthenticated request to initially +setup a new vault. Although this is unauthenticated, it is still safe: +data cannot be in vault prior to initialization, and any future +authentication will fail if you didn't initialize it yourself. +Additionally, once initialized, a vault cannot be reinitialized. + +This API is the only time Vault will ever be aware of your keys, and +the only time the keys will ever be returned in one unit. Care should +be taken to ensure that the output of this request is never logged, +and that the keys are properly distributed. + +The response also contains the initial root token that can be used +as authentication in order to initially configure Vault once it is +unsealed. Just as with the unseal keys, this is the only time Vault is +ever aware of this token. + ++ Request (application/json) + + { + "secret_shares": 5, + "secret_threshold": 3, + } + ++ Response 200 (application/json) + + { + "keys": ["one", "two", "three"], + "root_token": "foo" + } + +# Group Seal/Unseal + +## Seal Status [/sys/seal-status] +### Seal Status [GET] +Returns the status of whether the vault is currently +sealed or not, as well as the progress of unsealing. + +The response has the following attributes: + +- sealed (boolean) - If true, the vault is sealed. Otherwise, + it is unsealed. +- t (int) - The "t" value for the master key, or the number + of shards needed total to unseal the vault. +- n (int) - The "n" value for the master key, or the total + number of shards of the key distributed. +- progress (int) - The number of master key shards that have + been entered so far towards unsealing the vault. + ++ Response 200 (application/json) + + { + "sealed": true, + "t": 3, + "n": 5, + "progress": 1 + } + +## Seal [/sys/seal] +### Seal [PUT] +Seal the vault. + +Sealing the vault locks Vault from any future operations on any +secrets or system configuration until the vault is once again +unsealed. Internally, sealing throws away the keys to access the +encrypted vault data, so Vault is unable to access the data without +unsealing to get the encryption keys. + ++ Response 204 + +## Unseal [/sys/unseal] +### Unseal [PUT] +Unseal the vault. + +Unseal the vault by entering a portion of the master key. The +response object will tell you if the unseal is complete or +only partial. + +If the vault is already unsealed, this does nothing. It is +not an error, the return value just says the vault is unsealed. +Due to the architecture of Vault, we cannot validate whether +any portion of the unseal key given is valid until all keys +are inputted, therefore unsealing an already unsealed vault +is still a success even if the input key is invalid. + ++ Request (application/json) + + { + "key": "value" + } + ++ Response 200 (application/json) + + { + "sealed": true, + "t": 3, + "n": 5, + "progress": 1 + } + +# Group Authentication + +## List Auth Methods [/sys/auth] +### List all auth methods [GET] +Lists all available authentication methods. + +This returns the name of the authentication method as well as +a human-friendly long-form help text for the method that can be +shown to the user as documentation. + ++ Response 200 (application/json) + + { + "token": { + "type": "token", + "description": "Token authentication" + }, + "oauth": { + "type": "oauth", + "description": "OAuth authentication" + } + } + +## Single Auth Method [/sys/auth/{id}] + ++ Parameters + + id (required, string) ... The ID of the auth method. + +### Enable an auth method [PUT] +Enables an authentication method. + +The body of the request depends on the authentication method +being used. Please reference the documentation for the specific +authentication method you're enabling in order to determine what +parameters you must give it. + +If an authentication method is already enabled, then this can be +used to change the configuration, including even the type of +the configuration. + ++ Request (application/json) + + { + "type": "type", + "key": "value", + "key2": "value2" + } + ++ Response 204 + +### Disable an auth method [DELETE] +Disables an authentication method. Previously authenticated sessions +are immediately invalidated. + ++ Response 204 + +# Group Policies + +Policies are named permission sets that identities returned by +credential stores are bound to. This separates _authentication_ +from _authorization_. + +## Policies [/sys/policy] +### List all Policies [GET] + +List all the policies. + ++ Response 200 (application/json) + + { + "policies": ["root"] + } + +## Single Policy [/sys/policy/{id}] + ++ Parameters + + id (required, string) ... The name of the policy + +### Upsert [PUT] + +Create or update a policy with the given ID. + ++ Request (application/json) + + { + "rules": "HCL" + } + ++ Response 204 + +### Delete [DELETE] + +Delete a policy with the given ID. Any identities bound to this +policy will immediately become "deny all" despite already being +authenticated. + ++ Response 204 + +# Group Mounts + +Logical backends are mounted at _mount points_, similar to +filesystems. This allows you to mount the "aws" logical backend +at the "aws-us-east" path, so all access is at `/aws-us-east/keys/foo` +for example. This enables multiple logical backends to be enabled. + +## Mounts [/sys/mounts] +### List all mounts [GET] + +Lists all the active mount points. + ++ Response 200 (application/json) + + { + "aws": { + "type": "aws", + "description": "AWS" + }, + "pg": { + "type": "postgresql", + "description": "PostgreSQL dynamic users" + } + } + +## Single Mount [/sys/mounts/{path}] +### New Mount [POST] + +Mount a logical backend to a new path. + +Configuration for this new backend is done via the normal +read/write mechanism once it is mounted. + ++ Request (application/json) + + { + "type": "aws", + "description": "EU AWS tokens" + } + ++ Response 204 + +### Unmount [DELETE] + +Unmount a mount point. + ++ Response 204 + +## Remount [/sys/remount] +### Remount [POST] + +Move an already-mounted backend to a new path. + ++ Request (application/json) + + { + "from": "aws", + "to": "aws-east" + } + ++ Response 204 + +# Group Audit Backends + +Audit backends are responsible for shuttling the audit logs that +Vault generates to a durable system for future querying. By default, +audit logs are not stored anywhere. + +## Audit Backends [/sys/audit] +### List Enabled Audit Backends [GET] + +List all the enabled audit backends + ++ Response 200 (application/json) + + { + "file": { + "type": "file", + "description": "Send audit logs to a file", + "options": {} + } + } + +## Single Audit Backend [/sys/audit/{path}] + ++ Parameters + + path (required, string) ... The path where the audit backend is mounted + +### Enable [PUT] + +Enable an audit backend. + ++ Request (application/json) + + { + "type": "file", + "description": "send to a file", + "options": { + "path": "/var/log/vault.audit.log" + } + } + ++ Response 204 + +### Disable [DELETE] + +Disable an audit backend. + ++ Request (application/json) + ++ Response 204 + +# Group Secrets + +## Generic [/{mount}/{path}] + +This group documents the general format of reading and writing +to Vault. The exact structure of the keyspace is defined by the +logical backends in use, so documentation related to +a specific backend should be referenced for details on what keys +and routes are expected. + +The path for examples are `/prefix/path`, but in practice +these will be defined by the backends that are mounted. For +example, reading an AWS key might be at the `/aws/root` path. +These paths are defined by the logical backends. + ++ Parameters + + mount (required, string) ... The mount point for the + logical backend. Example: `aws`. + + path (optional, string) ... The path within the backend + to read or write data. + +### Read [GET] + +Read data from vault. + +The data read from the vault can either be a secret or +arbitrary configuration data. The type of data returned +depends on the path, and is defined by the logical backend. + +If the return value is a secret, then the return structure +is a mixture of arbitrary key/value along with the following +fields which are guaranteed to exist: + +- `lease_id` (string) - A unique ID used for renewal and + revocation. + +- `renewable` (bool) - If true, then this key can be renewed. + If a key can't be renewed, then a new key must be requested + after the lease duration period. + +- `lease_duration` (int) - The time in seconds that a secret is + valid for before it must be renewed. + +- `lease_duration_max` (int) - The maximum amount of time in + seconds that a secret is valid for. This will always be + greater than or equal to `lease_duration`. The difference + between this and `lease_duration` is an overlap window + where multiple keys may be valid. + +If the return value is not a secret, then the return structure +is an arbitrary JSON object. + ++ Response 200 (application/json) + + { + "lease_id": "UUID", + "lease_duration": 3600, + "key": "value" + } + +### Write [PUT] + +Write data to vault. + +The behavior and arguments to the write are defined by +the logical backend. + ++ Request (application/json) + + { + "key": "value" + } + ++ Response 204 + +# Group Lease Management + +## Renew Key [/sys/renew/{id}] + ++ Parameters + + id (required, string) ... The `lease_id` of the secret + to renew. + +### Renew [PUT] + ++ Response 200 (application/json) + + { + "lease_id": "...", + "lease_duration": 3600, + "access_key": "foo", + "secret_key": "bar" + } + +## Revoke Key [/sys/revoke/{id}] + ++ Parameters + + id (required, string) ... The `lease_id` of the secret + to revoke. + +### Revoke [PUT] + ++ Response 204 + +# Group Backend: AWS + +## Root Key [/aws/root] +### Set the Key [PUT] + +Set the root key that the logical backend will use to create +new secrets, IAM policies, etc. + ++ Request (application/json) + + { + "access_key": "key", + "secret_key": "key", + "region": "us-east-1" + } + ++ Response 204 + +## Policies [/aws/policies] +### List Policies [GET] + +List all the policies that can be used to create keys. + ++ Response 200 (application/json) + + [{ + "name": "root", + "description": "Root access" + }, { + "name": "web-deploy", + "description": "Enough permissions to deploy the web app." + }] + +## Single Policy [/aws/policies/{name}] + ++ Parameters + + name (required, string) ... Name of the policy. + +### Read [GET] + +Read a policy. + ++ Response 200 (application/json) + + { + "policy": "base64-encoded policy" + } + +### Upsert [PUT] + +Create or update a policy. + ++ Request (application/json) + + { + "policy": "base64-encoded policy" + } + ++ Response 204 + +### Delete [DELETE] + +Delete the policy with the given name. + ++ Response 204 + +## Generate Access Keys [/aws/keys/{policy}] +### Create [GET] + +This generates a new keypair for the given policy. + ++ Parameters + + policy (required, string) ... The policy under which to create + the key pair. + ++ Response 200 (application/json) + + { + "lease_id": "...", + "lease_duration": 3600, + "access_key": "foo", + "secret_key": "bar" + } diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go new file mode 100644 index 000000000..da870c111 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -0,0 +1,11 @@ +package api + +// Auth is used to perform credential backend related operations. +type Auth struct { + c *Client +} + +// Auth is used to return the client for credential-backend API calls. +func (c *Client) Auth() *Auth { + return &Auth{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go new file mode 100644 index 000000000..2dae4df62 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -0,0 +1,178 @@ +package api + +// TokenAuth is used to perform token backend operations on Vault +type TokenAuth struct { + c *Client +} + +// Token is used to return the client for token-backend API calls +func (a *Auth) Token() *TokenAuth { + return &TokenAuth{c: a.c} +} + +func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName) + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Lookup(token string) (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/auth/token/lookup/"+token) + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor/"+accessor) + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupSelf() (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew/"+token) + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RevokeAccessor revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessor(accessor string) error { + r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor/"+accessor) + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeOrphan revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphan(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan/"+token) + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeSelf revokes the token making the call +func (c *TokenAuth) RevokeSelf(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeTree is the "normal" revoke operation that revokes the given token and +// the entire tree underneath -- all of its child tokens, their child tokens, +// etc. +func (c *TokenAuth) RevokeTree(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke/"+token) + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// TokenCreateRequest is the options structure for creating a token. +type TokenCreateRequest struct { + ID string `json:"id,omitempty"` + Policies []string `json:"policies,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + Lease string `json:"lease,omitempty"` + TTL string `json:"ttl,omitempty"` + ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` + NoParent bool `json:"no_parent,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + DisplayName string `json:"display_name"` + NumUses int `json:"num_uses"` + Renewable *bool `json:"renewable,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go new file mode 100644 index 000000000..70bc5e00e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -0,0 +1,333 @@ +package api + +import ( + "crypto/tls" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const EnvVaultAddress = "VAULT_ADDR" +const EnvVaultCACert = "VAULT_CACERT" +const EnvVaultCAPath = "VAULT_CAPATH" +const EnvVaultClientCert = "VAULT_CLIENT_CERT" +const EnvVaultClientKey = "VAULT_CLIENT_KEY" +const EnvVaultInsecure = "VAULT_SKIP_VERIFY" +const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" +const EnvVaultWrapTTL = "VAULT_WRAP_TTL" + +var ( + errRedirect = errors.New("redirect") +) + +// WrappingLookupFunc is a function that, given an HTTP verb and a path, +// returns an optional string duration to be used for response wrapping (e.g. +// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", +// however, end-of-path forward slashes are not trimmed, so must match your +// called path precisely. +type WrappingLookupFunc func(operation, path string) string + +// Config is used to configure the creation of the client. +type Config struct { + // Address is the address of the Vault server. This should be a complete + // URL such as "http://vault.example.com". If you need a custom SSL + // cert or want to enable insecure mode, you need to specify a custom + // HttpClient. + Address string + + // HttpClient is the HTTP client to use, which will currently always have the + // same values as http.DefaultClient. This is used to control redirect behavior. + HttpClient *http.Client + + redirectSetup sync.Once +} + +// DefaultConfig returns a default configuration for the client. It is +// safe to modify the return value of this function. +// +// The default Address is https://127.0.0.1:8200, but this can be overridden by +// setting the `VAULT_ADDR` environment variable. +func DefaultConfig() *Config { + config := &Config{ + Address: "https://127.0.0.1:8200", + + HttpClient: cleanhttp.DefaultClient(), + } + config.HttpClient.Timeout = time.Second * 60 + transport := config.HttpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if v := os.Getenv(EnvVaultAddress); v != "" { + config.Address = v + } + + return config +} + +// ReadEnvironment reads configuration information from the +// environment. If there is an error, no configuration value +// is updated. +func (c *Config) ReadEnvironment() error { + var envAddress string + var envCACert string + var envCAPath string + var envClientCert string + var envClientKey string + var envInsecure bool + var foundInsecure bool + var envTLSServerName string + + var clientCert tls.Certificate + var foundClientCert bool + var err error + + if v := os.Getenv(EnvVaultAddress); v != "" { + envAddress = v + } + if v := os.Getenv(EnvVaultCACert); v != "" { + envCACert = v + } + if v := os.Getenv(EnvVaultCAPath); v != "" { + envCAPath = v + } + if v := os.Getenv(EnvVaultClientCert); v != "" { + envClientCert = v + } + if v := os.Getenv(EnvVaultClientKey); v != "" { + envClientKey = v + } + if v := os.Getenv(EnvVaultInsecure); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY") + } + foundInsecure = true + } + if v := os.Getenv(EnvVaultTLSServerName); v != "" { + envTLSServerName = v + } + // If we need custom TLS configuration, then set it + if envCACert != "" || envCAPath != "" || envClientCert != "" || envClientKey != "" || envInsecure { + if envClientCert != "" && envClientKey != "" { + clientCert, err = tls.LoadX509KeyPair(envClientCert, envClientKey) + if err != nil { + return err + } + foundClientCert = true + } else if envClientCert != "" || envClientKey != "" { + return fmt.Errorf("Both client cert and client key must be provided") + } + } + + if envAddress != "" { + c.Address = envAddress + } + + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + if foundInsecure { + clientTLSConfig.InsecureSkipVerify = envInsecure + } + + if foundClientCert { + clientTLSConfig.Certificates = []tls.Certificate{clientCert} + } + if envTLSServerName != "" { + clientTLSConfig.ServerName = envTLSServerName + } + + rootConfig := &rootcerts.Config{ + CAFile: envCACert, + CAPath: envCAPath, + } + err = rootcerts.ConfigureTLS(clientTLSConfig, rootConfig) + if err != nil { + return err + } + + return nil +} + +// Client is the client to the Vault API. Create a client with +// NewClient. +type Client struct { + addr *url.URL + config *Config + token string + wrappingLookupFunc WrappingLookupFunc +} + +// NewClient returns a new client for the given configuration. +// +// If the environment variable `VAULT_TOKEN` is present, the token will be +// automatically added to the client. Otherwise, you must manually call +// `SetToken()`. +func NewClient(c *Config) (*Client, error) { + u, err := url.Parse(c.Address) + if err != nil { + return nil, err + } + + if c.HttpClient == nil { + c.HttpClient = DefaultConfig().HttpClient + } + + redirFunc := func() { + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return errRedirect + } + } + + c.redirectSetup.Do(redirFunc) + + client := &Client{ + addr: u, + config: c, + } + + if token := os.Getenv("VAULT_TOKEN"); token != "" { + client.SetToken(token) + } + + return client, nil +} + +// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path +func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.wrappingLookupFunc = lookupFunc +} + +// Token returns the access token being used by this client. It will +// return the empty string if there is no token set. +func (c *Client) Token() string { + return c.token +} + +// SetToken sets the token directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetToken(v string) { + c.token = v +} + +// ClearToken deletes the token if it is set or does nothing otherwise. +func (c *Client) ClearToken() { + c.token = "" +} + +// NewRequest creates a new raw request object to query the Vault server +// configured for this client. This is an advanced method and generally +// doesn't need to be called externally. +func (c *Client) NewRequest(method, path string) *Request { + req := &Request{ + Method: method, + URL: &url.URL{ + Scheme: c.addr.Scheme, + Host: c.addr.Host, + Path: path, + }, + ClientToken: c.token, + Params: make(map[string][]string), + } + + if c.wrappingLookupFunc != nil { + var lookupPath string + switch { + case strings.HasPrefix(path, "/v1/"): + lookupPath = strings.TrimPrefix(path, "/v1/") + case strings.HasPrefix(path, "v1/"): + lookupPath = strings.TrimPrefix(path, "v1/") + default: + lookupPath = path + } + req.WrapTTL = c.wrappingLookupFunc(method, lookupPath) + } + + return req +} + +// RawRequest performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +func (c *Client) RawRequest(r *Request) (*Response, error) { + redirectCount := 0 +START: + req, err := r.ToHTTP() + if err != nil { + return nil, err + } + + var result *Response + resp, err := c.config.HttpClient.Do(req) + if resp != nil { + result = &Response{Response: resp} + } + if err != nil { + if urlErr, ok := err.(*url.Error); ok && urlErr.Err == errRedirect { + err = nil + } else if strings.Contains(err.Error(), "tls: oversized") { + err = fmt.Errorf( + "%s\n\n"+ + "This error usually means that the server is running with TLS disabled\n"+ + "but the client is configured to use TLS. Please either enable TLS\n"+ + "on the server or run the client with -address set to an address\n"+ + "that uses the http protocol:\n\n"+ + " vault -address http://
\n\n"+ + "You can also set the VAULT_ADDR environment variable:\n\n\n"+ + " VAULT_ADDR=http://
vault \n\n"+ + "where
is replaced by the actual address to the server.", + err) + } + } + if err != nil { + return result, err + } + + // Check for a redirect, only allowing for a single redirect + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + r.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, err + } + + // Retry the request + redirectCount++ + goto START + } + + if err := result.Error(); err != nil { + return result, err + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go new file mode 100644 index 000000000..b9ae100bc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -0,0 +1,25 @@ +package api + +import ( + "fmt" +) + +// Help reads the help information for the given path. +func (c *Client) Help(path string) (*Help, error) { + r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) + r.Params.Add("help", "1") + resp, err := c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result Help + err = resp.DecodeJSON(&result) + return &result, err +} + +type Help struct { + Help string `json:"help"` + SeeAlso []string `json:"see_also"` +} diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go new file mode 100644 index 000000000..2e967f445 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -0,0 +1,123 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" +) + +const ( + wrappedResponseLocation = "cubbyhole/response" +) + +// Logical is used to perform logical backend operations on Vault. +type Logical struct { + c *Client +} + +// Logical is used to return the client for logical-backend API calls. +func (c *Client) Logical() *Logical { + return &Logical{c: c} +} + +func (c *Logical) Read(path string) (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/"+path) + resp, err := c.c.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) List(path string) (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/"+path) + r.Params.Set("list", "true") + resp, err := c.c.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/"+path) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + if resp.StatusCode == 200 { + return ParseSecret(resp.Body) + } + + return nil, nil +} + +func (c *Logical) Delete(path string) (*Secret, error) { + r := c.c.NewRequest("DELETE", "/v1/"+path) + resp, err := c.c.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + if resp.StatusCode == 200 { + return ParseSecret(resp.Body) + } + + return nil, nil +} + +func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + origToken := c.c.Token() + defer c.c.SetToken(origToken) + + c.c.SetToken(wrappingToken) + + secret, err := c.Read(wrappedResponseLocation) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err) + } + if secret == nil { + return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation) + } + if secret.Data == nil { + return nil, fmt.Errorf("\"data\" not found in wrapping response") + } + if _, ok := secret.Data["response"]; !ok { + return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") + } + + wrappedSecret := new(Secret) + buf := bytes.NewBufferString(secret.Data["response"].(string)) + dec := json.NewDecoder(buf) + dec.UseNumber() + if err := dec.Decode(wrappedSecret); err != nil { + return nil, fmt.Errorf("error unmarshaling wrapped secret: %s", err) + } + + return wrappedSecret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go new file mode 100644 index 000000000..8f22dd572 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -0,0 +1,71 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/url" +) + +// Request is a raw request configuration structure used to initiate +// API requests to the Vault server. +type Request struct { + Method string + URL *url.URL + Params url.Values + ClientToken string + WrapTTL string + Obj interface{} + Body io.Reader + BodySize int64 +} + +// SetJSONBody is used to set a request body that is a JSON-encoded value. +func (r *Request) SetJSONBody(val interface{}) error { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(val); err != nil { + return err + } + + r.Obj = val + r.Body = buf + r.BodySize = int64(buf.Len()) + return nil +} + +// ResetJSONBody is used to reset the body for a redirect +func (r *Request) ResetJSONBody() error { + if r.Body == nil { + return nil + } + return r.SetJSONBody(r.Obj) +} + +// ToHTTP turns this request into a valid *http.Request for use with the +// net/http package. +func (r *Request) ToHTTP() (*http.Request, error) { + // Encode the query parameters + r.URL.RawQuery = r.Params.Encode() + + // Create the HTTP request + req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body) + if err != nil { + return nil, err + } + + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if len(r.ClientToken) != 0 { + req.Header.Set("X-Vault-Token", r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + return req, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go new file mode 100644 index 000000000..a646a0da2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -0,0 +1,75 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" +) + +// Response is a raw response that wraps an HTTP response. +type Response struct { + *http.Response +} + +// DecodeJSON will decode the response body to a JSON structure. This +// will consume the response body, but will not close it. Close must +// still be called. +func (r *Response) DecodeJSON(out interface{}) error { + dec := json.NewDecoder(r.Body) + dec.UseNumber() + return dec.Decode(out) +} + +// Error returns an error response if there is one. If there is an error, +// this will fully consume the response body, but will not close it. The +// body must still be closed manually. +func (r *Response) Error() error { + // 200 to 399 are okay status codes + if r.StatusCode >= 200 && r.StatusCode < 400 { + return nil + } + + // We have an error. Let's copy the body into our own buffer first, + // so that if we can't decode JSON, we can at least copy it raw. + var bodyBuf bytes.Buffer + if _, err := io.Copy(&bodyBuf, r.Body); err != nil { + return err + } + + // Decode the error response if we can. Note that we wrap the bodyBuf + // in a bytes.Reader here so that the JSON decoder doesn't move the + // read pointer for the original buffer. + var resp ErrorResponse + dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) + dec.UseNumber() + if err := dec.Decode(&resp); err != nil { + // Ignore the decoding error and just drop the raw response + return fmt.Errorf( + "Error making API request.\n\n"+ + "URL: %s %s\n"+ + "Code: %d. Raw Message:\n\n%s", + r.Request.Method, r.Request.URL.String(), + r.StatusCode, bodyBuf.String()) + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + "URL: %s %s\n"+ + "Code: %d. Errors:\n\n", + r.Request.Method, r.Request.URL.String(), + r.StatusCode)) + for _, err := range resp.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + + return fmt.Errorf(errBody.String()) +} + +// ErrorResponse is the raw structure of errors when they're returned by the +// HTTP API. +type ErrorResponse struct { + Errors []string +} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go new file mode 100644 index 000000000..d0a539e47 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -0,0 +1,66 @@ +package api + +import ( + "encoding/json" + "io" + "time" +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + LeaseID string `json:"lease_id"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} `json:"data"` + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string `json:"warnings"` + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth `json:"auth,omitempty"` + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string `json:"token"` + TTL int `json:"ttl"` + CreationTime time.Time `json:"creation_time"` + WrappedAccessor string `json:"wrapped_accessor"` +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + Metadata map[string]string `json:"metadata"` + + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` +} + +// ParseSecret is used to parse a secret value from JSON from an io.Reader. +func ParseSecret(r io.Reader) (*Secret, error) { + // First decode the JSON into a map[string]interface{} + var secret Secret + dec := json.NewDecoder(r) + dec.UseNumber() + if err := dec.Decode(&secret); err != nil { + return nil, err + } + + return &secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go new file mode 100644 index 000000000..7c3e56bb4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -0,0 +1,38 @@ +package api + +import "fmt" + +// SSH is used to return a client to invoke operations on SSH backend. +type SSH struct { + c *Client + MountPoint string +} + +// SSH returns the client for logical-backend API calls. +func (c *Client) SSH() *SSH { + return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHWithMountPoint returns the client with specific SSH mount point. +func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { + return &SSH{ + c: c, + MountPoint: mountPoint, + } +} + +// Credential invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go new file mode 100644 index 000000000..2dd85d14f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -0,0 +1,239 @@ +package api + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" +) + +const ( + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. + SSHHelperDefaultMountPoint = "ssh" + + // VerifyEchoRequest is the echo request message sent as OTP by the helper. + VerifyEchoRequest = "verify-echo-request" + + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. + VerifyEchoResponse = "verify-echo-response" +) + +// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server +// in order to verify the OTP entered by the user. It contains the path at which +// SSH backend is mounted at the server. +type SSHHelper struct { + c *Client + MountPoint string +} + +// SSHVerifyResponse is a structure representing the fields in Vault server's +// response. +type SSHVerifyResponse struct { + // Usually empty. If the request OTP is echo request message, this will + // be set to the corresponding echo response message. + Message string `mapstructure:"message"` + + // Username associated with the OTP + Username string `mapstructure:"username"` + + // IP associated with the OTP + IP string `mapstructure:"ip"` +} + +// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. +type SSHHelperConfig struct { + VaultAddr string `hcl:"vault_addr"` + SSHMountPoint string `hcl:"ssh_mount_point"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + AllowedCidrList string `hcl:"allowed_cidr_list"` + TLSSkipVerify bool `hcl:"tls_skip_verify"` +} + +// SetTLSParameters sets the TLS parameters for this SSH agent. +func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.TLSSkipVerify, + MinVersion: tls.VersionTLS12, + RootCAs: certPool, + } + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = tlsConfig + clientConfig.HttpClient.Transport = transport +} + +// NewClient returns a new client for the configuration. This client will be used by the +// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. +// If the configuration supplies Vault SSL certificates, then the client will +// have TLS configured in its transport. +func (c *SSHHelperConfig) NewClient() (*Client, error) { + // Creating a default client configuration for communicating with vault server. + clientConfig := DefaultConfig() + + // Pointing the client to the actual address of vault server. + clientConfig.Address = c.VaultAddr + + // Check if certificates are provided via config file. + if c.CACert != "" || c.CAPath != "" || c.TLSSkipVerify { + rootConfig := &rootcerts.Config{ + CAFile: c.CACert, + CAPath: c.CAPath, + } + certPool, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return nil, err + } + // Enable TLS on the HTTP client information + c.SetTLSParameters(clientConfig, certPool) + } + + // Creating the client object for the given configuration + client, err := NewClient(clientConfig) + if err != nil { + return nil, err + } + + return client, nil +} + +// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding +// in-memory structure. +// +// Vault address is a required parameter. +// Mount point defaults to "ssh". +func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} + +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object") + } + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "tls_skip_verify", + } + if err := checkHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'") + } + return &c, nil +} + +// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at default path ("ssh"). +func (c *Client) SSHHelper() *SSHHelper { + return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at a specific mount point. +func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { + return &SSHHelper{ + c: c, + MountPoint: mountPoint, + } +} + +// Verify verifies if the key provided by user is present in Vault server. The response +// will contain the IP address and username associated with the OTP. In case the +// OTP matches the echo request message, instead of searching an entry for the OTP, +// an echo response message is returned. This feature is used by ssh-helper to verify if +// its configured correctly. +func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + data := map[string]interface{}{ + "otp": otp, + } + verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) + r := c.c.NewRequest("PUT", verifyPath) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + + if secret.Data == nil { + return nil, nil + } + + var verifyResp SSHVerifyResponse + err = mapstructure.Decode(secret.Data, &verifyResp) + if err != nil { + return nil, err + } + return &verifyResp, nil +} + +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key '%s' on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go new file mode 100644 index 000000000..5fb111887 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys.go @@ -0,0 +1,11 @@ +package api + +// Sys is used to perform system-related operations on Vault. +type Sys struct { + c *Client +} + +// Sys is used to return the client for sys-related API calls. +func (c *Client) Sys() *Sys { + return &Sys{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go new file mode 100644 index 000000000..6fbe1ef22 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -0,0 +1,85 @@ +package api + +import ( + "fmt" +) + +func (c *Sys) AuditHash(path string, input string) (string, error) { + body := map[string]interface{}{ + "input": input, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return "", err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return "", err + } + defer resp.Body.Close() + + type d struct { + Hash string + } + + var result d + err = resp.DecodeJSON(&result) + return result.Hash, err +} + +func (c *Sys) ListAudit() (map[string]*Audit, error) { + r := c.c.NewRequest("GET", "/v1/sys/audit") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result map[string]*Audit + err = resp.DecodeJSON(&result) + return result, err +} + +func (c *Sys) EnableAudit( + path string, auditType string, desc string, opts map[string]string) error { + body := map[string]interface{}{ + "type": auditType, + "description": desc, + "options": opts, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAudit(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/resposne are all down here. They aren't +// individually documentd because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type Audit struct { + Path string + Type string + Description string + Options map[string]string +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go new file mode 100644 index 000000000..8e1cdec39 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -0,0 +1,56 @@ +package api + +import ( + "fmt" +) + +func (c *Sys) ListAuth() (map[string]*AuthMount, error) { + r := c.c.NewRequest("GET", "/v1/sys/auth") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result map[string]*AuthMount + err = resp.DecodeJSON(&result) + return result, err +} + +func (c *Sys) EnableAuth(path, authType, desc string) error { + body := map[string]string{ + "type": authType, + "description": desc, + } + + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAuth(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/resposne are all down here. They aren't +// individually documentd because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type AuthMount struct { + Type string + Description string +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go new file mode 100644 index 000000000..e3034dded --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -0,0 +1,60 @@ +package api + +func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { + body := map[string]string{ + "path": path, + } + + r := c.c.NewRequest("POST", "/v1/sys/capabilities-self") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result map[string]interface{} + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + var capabilities []string + capabilitiesRaw := result["capabilities"].([]interface{}) + for _, capability := range capabilitiesRaw { + capabilities = append(capabilities, capability.(string)) + } + return capabilities, nil +} + +func (c *Sys) Capabilities(token, path string) ([]string, error) { + body := map[string]string{ + "token": token, + "path": path, + } + + r := c.c.NewRequest("POST", "/v1/sys/capabilities") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result map[string]interface{} + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + var capabilities []string + capabilitiesRaw := result["capabilities"].([]interface{}) + for _, capability := range capabilitiesRaw { + capabilities = append(capabilities, capability.(string)) + } + return capabilities, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go new file mode 100644 index 000000000..8dc2095f3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -0,0 +1,77 @@ +package api + +func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/generate-root/attempt") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + body := map[string]interface{}{ + "otp": otp, + "pgp_key": pgpKey, + } + + r := c.c.NewRequest("PUT", "/v1/sys/generate-root/attempt") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/generate-root/attempt") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/generate-root/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type GenerateRootStatusResponse struct { + Nonce string + Started bool + Progress int + Required int + Complete bool + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go new file mode 100644 index 000000000..47e971824 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -0,0 +1,51 @@ +package api + +func (c *Sys) InitStatus() (bool, error) { + r := c.c.NewRequest("GET", "/v1/sys/init") + resp, err := c.c.RawRequest(r) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var result InitStatusResponse + err = resp.DecodeJSON(&result) + return result.Initialized, err +} + +func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/init") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result InitResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` +} + +type InitStatusResponse struct { + Initialized bool +} + +type InitResponse struct { + Keys []string + RecoveryKeys []string `json:"recovery_keys"` + RootToken string `json:"root_token"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go new file mode 100644 index 000000000..201ac732e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -0,0 +1,20 @@ +package api + +func (c *Sys) Leader() (*LeaderResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/leader") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result LeaderResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + LeaderAddress string `json:"leader_address"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_lease.go b/vendor/github.com/hashicorp/vault/api/sys_lease.go new file mode 100644 index 000000000..e103990d4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_lease.go @@ -0,0 +1,45 @@ +package api + +func (c *Sys) Renew(id string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/sys/renew/"+id) + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Revoke(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokePrefix(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeForce(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go new file mode 100644 index 000000000..623a14628 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -0,0 +1,113 @@ +package api + +import ( + "fmt" + + "github.com/fatih/structs" +) + +func (c *Sys) ListMounts() (map[string]*MountOutput, error) { + r := c.c.NewRequest("GET", "/v1/sys/mounts") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result map[string]*MountOutput + err = resp.DecodeJSON(&result) + return result, err +} + +func (c *Sys) Mount(path string, mountInfo *MountInput) error { + body := structs.Map(mountInfo) + + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) Unmount(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) Remount(from, to string) error { + body := map[string]interface{}{ + "from": from, + "to": to, + } + + r := c.c.NewRequest("POST", "/v1/sys/remount") + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) TuneMount(path string, config MountConfigInput) error { + body := structs.Map(config) + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result MountConfigOutput + err = resp.DecodeJSON(&result) + return &result, err +} + +type MountInput struct { + Type string `json:"type" structs:"type"` + Description string `json:"description" structs:"description"` + Config MountConfigInput `json:"config" structs:"config"` +} + +type MountConfigInput struct { + DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` +} + +type MountOutput struct { + Type string `json:"type" structs:"type"` + Description string `json:"description" structs:"description"` + Config MountConfigOutput `json:"config" structs:"config"` +} + +type MountConfigOutput struct { + DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go new file mode 100644 index 000000000..5f013eb5e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -0,0 +1,72 @@ +package api + +import ( + "fmt" +) + +func (c *Sys) ListPolicies() ([]string, error) { + r := c.c.NewRequest("GET", "/v1/sys/policy") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result listPoliciesResp + err = resp.DecodeJSON(&result) + return result.Policies, err +} + +func (c *Sys) GetPolicy(name string) (string, error) { + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name)) + resp, err := c.c.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return "", nil + } + } + if err != nil { + return "", err + } + + var result getPoliciesResp + err = resp.DecodeJSON(&result) + return result.Rules, err +} + +func (c *Sys) PutPolicy(name, rules string) error { + body := map[string]string{ + "rules": rules, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policy/%s", name)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DeletePolicy(name string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name)) + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type getPoliciesResp struct { + Rules string `json:"rules"` +} + +type listPoliciesResp struct { + Policies []string `json:"policies"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go new file mode 100644 index 000000000..4fbfbb9fc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -0,0 +1,200 @@ +package api + +func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/init") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyRetrieveResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyRetrieveResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyDeleteBackup() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyDeleteRecoveryBackup() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +type RekeyInitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + PGPKeys []string `json:"pgp_keys"` + Backup bool +} + +type RekeyStatusResponse struct { + Nonce string + Started bool + T int + N int + Progress int + Required int + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool +} + +type RekeyUpdateResponse struct { + Nonce string + Complete bool + Keys []string + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool +} + +type RekeyRetrieveResponse struct { + Nonce string + Keys map[string][]string +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go new file mode 100644 index 000000000..22f71d78a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -0,0 +1,30 @@ +package api + +import "time" + +func (c *Sys) Rotate() error { + r := c.c.NewRequest("POST", "/v1/sys/rotate") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) KeyStatus() (*KeyStatus, error) { + r := c.c.NewRequest("GET", "/v1/sys/key-status") + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + result := new(KeyStatus) + err = resp.DecodeJSON(result) + return result, err +} + +type KeyStatus struct { + Term int + InstallTime time.Time `json:"install_time"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go new file mode 100644 index 000000000..2a5ad4be2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -0,0 +1,56 @@ +package api + +func (c *Sys) SealStatus() (*SealStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/seal-status") + return sealStatusRequest(c, r) +} + +func (c *Sys) Seal() error { + r := c.c.NewRequest("PUT", "/v1/sys/seal") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + body := map[string]interface{}{"reset": true} + + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + body := map[string]interface{}{"key": shard} + + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { + resp, err := c.c.RawRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result SealStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type SealStatusResponse struct { + Sealed bool + T int + N int + Progress int +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go new file mode 100644 index 000000000..421e5f19f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -0,0 +1,10 @@ +package api + +func (c *Sys) StepDown() error { + r := c.c.NewRequest("PUT", "/v1/sys/step-down") + resp, err := c.c.RawRequest(r) + if err == nil { + defer resp.Body.Close() + } + return err +} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore new file mode 100644 index 000000000..e7081ff52 --- /dev/null +++ b/vendor/github.com/magiconair/properties/.gitignore @@ -0,0 +1,6 @@ +*.sublime-project +*.sublime-workspace +*.un~ +*.swp +.idea/ +*.iml diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml new file mode 100644 index 000000000..ab9803902 --- /dev/null +++ b/vendor/github.com/magiconair/properties/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - tip diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md new file mode 100644 index 000000000..b0d98d81d --- /dev/null +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -0,0 +1,104 @@ +## Changelog + +### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 + + * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces + * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled + Thanks to @mgurov for the fix. + +### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 + + * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically + * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map + +### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 + + * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency + * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) + +### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 + + * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` + * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs + * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy + * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function + +### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 + + * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. + * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. + * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) + +### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 + + * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. + +### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 + + * Vendored in gopkg.in/check.v1 + +### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 + + * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) + +### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 + + * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. + +### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 + + * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) + +### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 + + * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty + * Add clickable links to README + +### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 + + * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with + [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). + +### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 + + * Added support for single and multi-line comments (reading, writing and updating) + * The order of keys is now preserved + * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry + * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method + * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) + +### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 + + * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one + +### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 + + * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string + +### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 + + * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys + * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties + +### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 + +* Added support for time.Duration +* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) +* Changed default of MustXXX() failure from panic to log.Fatal + +### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 + +* Added MustGet... functions +* Added support for int and uint with range checks on 32 bit platforms + +### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 + +* Renamed from goproperties to properties +* Added support for expansion of environment vars in + filenames and value expressions +* Fixed bug where value expressions were not at the + start of the string + +### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 + +* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE new file mode 100644 index 000000000..7eab43b6b --- /dev/null +++ b/vendor/github.com/magiconair/properties/LICENSE @@ -0,0 +1,25 @@ +goproperties - properties file decoder for Go + +Copyright (c) 2013-2014 - Frank Schroeder + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md new file mode 100644 index 000000000..258fbb2b5 --- /dev/null +++ b/vendor/github.com/magiconair/properties/README.md @@ -0,0 +1,100 @@ +Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties) +======== + +#### Current version: 1.7.4 + +properties is a Go library for reading and writing properties files. + +It supports reading from multiple files or URLs and Spring style recursive +property expansion of expressions like `${key}` to their corresponding value. +Value expressions can refer to other keys like in `${key}` or to environment +variables like in `${USER}`. Filenames can also contain environment variables +like in `/home/${USER}/myapp.properties`. + +Properties can be decoded into structs, maps, arrays and values through +struct tags. + +Comments and the order of keys are preserved. Comments can be modified +and can be written to the output. + +The properties library supports both ISO-8859-1 and UTF-8 encoded data. + +Starting from version 1.3.0 the behavior of the MustXXX() functions is +configurable by providing a custom `ErrorHandler` function. The default has +changed from `panic` to `log.Fatal` but this is configurable and custom +error handling functions can be provided. See the package documentation for +details. + +Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties) + +Getting Started +--------------- + +```go +import ( + "flag" + "github.com/magiconair/properties" +) + +func main() { + // init from a file + p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) + + // or multiple files + p = properties.MustLoadFiles([]string{ + "${HOME}/config.properties", + "${HOME}/config-${USER}.properties", + }, properties.UTF8, true) + + // or from a map + p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) + + // or from a string + p = properties.MustLoadString("key=value\nabc=def") + + // or from a URL + p = properties.MustLoadURL("http://host/path") + + // or from multiple URLs + p = properties.MustLoadURL([]string{ + "http://host/config", + "http://host/config-${USER}", + }, true) + + // or from flags + p.MustFlag(flag.CommandLine) + + // get values through getters + host := p.MustGetString("host") + port := p.GetInt("port", 8080) + + // or through Decode + type Config struct { + Host string `properties:"host"` + Port int `properties:"port,default=9000"` + Accept []string `properties:"accept,default=image/png;image;gif"` + Timeout time.Duration `properties:"timeout,default=5s"` + } + var cfg Config + if err := p.Decode(&cfg); err != nil { + log.Fatal(err) + } +} + +``` + +Installation and Upgrade +------------------------ + +``` +$ go get -u github.com/magiconair/properties +``` + +License +------- + +2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. + +ToDo +---- +* Dump contents with passwords and secrets obscured diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go new file mode 100644 index 000000000..0a961bb04 --- /dev/null +++ b/vendor/github.com/magiconair/properties/decode.go @@ -0,0 +1,289 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Decode assigns property values to exported fields of a struct. +// +// Decode traverses v recursively and returns an error if a value cannot be +// converted to the field type or a required value is missing for a field. +// +// The following type dependent decodings are used: +// +// String, boolean, numeric fields have the value of the property key assigned. +// The property key name is the name of the field. A different key and a default +// value can be set in the field's tag. Fields without default value are +// required. If the value cannot be converted to the field type an error is +// returned. +// +// time.Duration fields have the result of time.ParseDuration() assigned. +// +// time.Time fields have the vaule of time.Parse() assigned. The default layout +// is time.RFC3339 but can be set in the field's tag. +// +// Arrays and slices of string, boolean, numeric, time.Duration and time.Time +// fields have the value interpreted as a comma separated list of values. The +// individual values are trimmed of whitespace and empty values are ignored. A +// default value can be provided as a semicolon separated list in the field's +// tag. +// +// Struct fields are decoded recursively using the field name plus "." as +// prefix. The prefix (without dot) can be overridden in the field's tag. +// Default values are not supported in the field's tag. Specify them on the +// fields of the inner struct instead. +// +// Map fields must have a key of type string and are decoded recursively by +// using the field's name plus ".' as prefix and the next element of the key +// name as map key. The prefix (without dot) can be overridden in the field's +// tag. Default values are not supported. +// +// Examples: +// +// // Field is ignored. +// Field int `properties:"-"` +// +// // Field is assigned value of 'Field'. +// Field int +// +// // Field is assigned value of 'myName'. +// Field int `properties:"myName"` +// +// // Field is assigned value of key 'myName' and has a default +// // value 15 if the key does not exist. +// Field int `properties:"myName,default=15"` +// +// // Field is assigned value of key 'Field' and has a default +// // value 15 if the key does not exist. +// Field int `properties:",default=15"` +// +// // Field is assigned value of key 'date' and the date +// // is in format 2006-01-02 +// Field time.Time `properties:"date,layout=2006-01-02"` +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas. +// Field []string +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas and has a default +// // value ["a", "b", "c"] if the key does not exist. +// Field []string `properties:",default=a;b;c"` +// +// // Field is decoded recursively with "Field." as key prefix. +// Field SomeStruct +// +// // Field is decoded recursively with "myName." as key prefix. +// Field SomeStruct `properties:"myName"` +// +// // Field is decoded recursively with "Field." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string +// +// // Field is decoded recursively with "myName." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string `properties:"myName"` +func (p *Properties) Decode(x interface{}) error { + t, v := reflect.TypeOf(x), reflect.ValueOf(x) + if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { + return fmt.Errorf("not a pointer to struct: %s", t) + } + if err := dec(p, "", nil, nil, v); err != nil { + return err + } + return nil +} + +func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { + t := v.Type() + + // value returns the property value for key or the default if provided. + value := func() (string, error) { + if val, ok := p.Get(key); ok { + return val, nil + } + if def != nil { + return *def, nil + } + return "", fmt.Errorf("missing required key %s", key) + } + + // conv converts a string to a value of the given type. + conv := func(s string, t reflect.Type) (val reflect.Value, err error) { + var v interface{} + + switch { + case isDuration(t): + v, err = time.ParseDuration(s) + + case isTime(t): + layout := opts["layout"] + if layout == "" { + layout = time.RFC3339 + } + v, err = time.Parse(layout, s) + + case isBool(t): + v, err = boolVal(s), nil + + case isString(t): + v, err = s, nil + + case isFloat(t): + v, err = strconv.ParseFloat(s, 64) + + case isInt(t): + v, err = strconv.ParseInt(s, 10, 64) + + case isUint(t): + v, err = strconv.ParseUint(s, 10, 64) + + default: + return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) + } + if err != nil { + return reflect.Zero(t), err + } + return reflect.ValueOf(v).Convert(t), nil + } + + // keydef returns the property key and the default value based on the + // name of the struct field and the options in the tag. + keydef := func(f reflect.StructField) (string, *string, map[string]string) { + _key, _opts := parseTag(f.Tag.Get("properties")) + + var _def *string + if d, ok := _opts["default"]; ok { + _def = &d + } + if _key != "" { + return _key, _def, _opts + } + return f.Name, _def, _opts + } + + switch { + case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): + s, err := value() + if err != nil { + return err + } + val, err := conv(s, t) + if err != nil { + return err + } + v.Set(val) + + case isPtr(t): + return dec(p, key, def, opts, v.Elem()) + + case isStruct(t): + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + fk, def, opts := keydef(t.Field(i)) + if !fv.CanSet() { + return fmt.Errorf("cannot set %s", t.Field(i).Name) + } + if fk == "-" { + continue + } + if key != "" { + fk = key + "." + fk + } + if err := dec(p, fk, def, opts, fv); err != nil { + return err + } + } + return nil + + case isArray(t): + val, err := value() + if err != nil { + return err + } + vals := split(val, ";") + a := reflect.MakeSlice(t, 0, len(vals)) + for _, s := range vals { + val, err := conv(s, t.Elem()) + if err != nil { + return err + } + a = reflect.Append(a, val) + } + v.Set(a) + + case isMap(t): + valT := t.Elem() + m := reflect.MakeMap(t) + for postfix := range p.FilterStripPrefix(key + ".").m { + pp := strings.SplitN(postfix, ".", 2) + mk, mv := pp[0], reflect.New(valT) + if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) + } + v.Set(m) + + default: + return fmt.Errorf("unsupported type %s", t) + } + return nil +} + +// split splits a string on sep, trims whitespace of elements +// and omits empty elements +func split(s string, sep string) []string { + var a []string + for _, v := range strings.Split(s, sep) { + if v = strings.TrimSpace(v); v != "" { + a = append(a, v) + } + } + return a +} + +// parseTag parses a "key,k=v,k=v,..." +func parseTag(tag string) (key string, opts map[string]string) { + opts = map[string]string{} + for i, s := range strings.Split(tag, ",") { + if i == 0 { + key = s + continue + } + + pp := strings.SplitN(s, "=", 2) + if len(pp) == 1 { + opts[pp[0]] = "" + } else { + opts[pp[0]] = pp[1] + } + } + return key, opts +} + +func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } +func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } +func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } +func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } +func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } +func isString(t reflect.Type) bool { return t.Kind() == reflect.String } +func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } +func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } +func isFloat(t reflect.Type) bool { + return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 +} +func isInt(t reflect.Type) bool { + return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 +} +func isUint(t reflect.Type) bool { + return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 +} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go new file mode 100644 index 000000000..36c836808 --- /dev/null +++ b/vendor/github.com/magiconair/properties/doc.go @@ -0,0 +1,156 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package properties provides functions for reading and writing +// ISO-8859-1 and UTF-8 encoded .properties files and has +// support for recursive property expansion. +// +// Java properties files are ISO-8859-1 encoded and use Unicode +// literals for characters outside the ISO character set. Unicode +// literals can be used in UTF-8 encoded properties files but +// aren't necessary. +// +// To load a single properties file use MustLoadFile(): +// +// p := properties.MustLoadFile(filename, properties.UTF8) +// +// To load multiple properties files use MustLoadFiles() +// which loads the files in the given order and merges the +// result. Missing properties files can be ignored if the +// 'ignoreMissing' flag is set to true. +// +// Filenames can contain environment variables which are expanded +// before loading. +// +// f1 := "/etc/myapp/myapp.conf" +// f2 := "/home/${USER}/myapp.conf" +// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) +// +// All of the different key/value delimiters ' ', ':' and '=' are +// supported as well as the comment characters '!' and '#' and +// multi-line values. +// +// ! this is a comment +// # and so is this +// +// # the following expressions are equal +// key value +// key=value +// key:value +// key = value +// key : value +// key = val\ +// ue +// +// Properties stores all comments preceding a key and provides +// GetComments() and SetComments() methods to retrieve and +// update them. The convenience functions GetComment() and +// SetComment() allow access to the last comment. The +// WriteComment() method writes properties files including +// the comments and with the keys in the original order. +// This can be used for sanitizing properties files. +// +// Property expansion is recursive and circular references +// and malformed expressions are not allowed and cause an +// error. Expansion of environment variables is supported. +// +// # standard property +// key = value +// +// # property expansion: key2 = value +// key2 = ${key} +// +// # recursive expansion: key3 = value +// key3 = ${key2} +// +// # circular reference (error) +// key = ${key} +// +// # malformed expression (error) +// key = ${ke +// +// # refers to the users' home dir +// home = ${HOME} +// +// # local key takes precendence over env var: u = foo +// USER = foo +// u = ${USER} +// +// The default property expansion format is ${key} but can be +// changed by setting different pre- and postfix values on the +// Properties object. +// +// p := properties.NewProperties() +// p.Prefix = "#[" +// p.Postfix = "]#" +// +// Properties provides convenience functions for getting typed +// values with default values if the key does not exist or the +// type conversion failed. +// +// # Returns true if the value is either "1", "on", "yes" or "true" +// # Returns false for every other value and the default value if +// # the key does not exist. +// v = p.GetBool("key", false) +// +// # Returns the value if the key exists and the format conversion +// # was successful. Otherwise, the default value is returned. +// v = p.GetInt64("key", 999) +// v = p.GetUint64("key", 999) +// v = p.GetFloat64("key", 123.0) +// v = p.GetString("key", "def") +// v = p.GetDuration("key", 999) +// +// As an alterantive properties may be applied with the standard +// library's flag implementation at any time. +// +// # Standard configuration +// v = flag.Int("key", 999, "help message") +// flag.Parse() +// +// # Merge p into the flag set +// p.MustFlag(flag.CommandLine) +// +// Properties provides several MustXXX() convenience functions +// which will terminate the app if an error occurs. The behavior +// of the failure is configurable and the default is to call +// log.Fatal(err). To have the MustXXX() functions panic instead +// of logging the error set a different ErrorHandler before +// you use the Properties package. +// +// properties.ErrorHandler = properties.PanicHandler +// +// # Will panic instead of logging an error +// p := properties.MustLoadFile("config.properties") +// +// You can also provide your own ErrorHandler function. The only requirement +// is that the error handler function must exit after handling the error. +// +// properties.ErrorHandler = func(err error) { +// fmt.Println(err) +// os.Exit(1) +// } +// +// # Will write to stdout and then exit +// p := properties.MustLoadFile("config.properties") +// +// Properties can also be loaded into a struct via the `Decode` +// method, e.g. +// +// type S struct { +// A string `properties:"a,default=foo"` +// D time.Duration `properties:"timeout,default=5s"` +// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` +// } +// +// See `Decode()` method for the full documentation. +// +// The following documents provide a description of the properties +// file format. +// +// http://en.wikipedia.org/wiki/.properties +// +// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 +// +package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go new file mode 100644 index 000000000..0d775e035 --- /dev/null +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -0,0 +1,34 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import "flag" + +// MustFlag sets flags that are skipped by dst.Parse when p contains +// the respective key for flag.Flag.Name. +// +// It's use is recommended with command line arguments as in: +// flag.Parse() +// p.MustFlag(flag.CommandLine) +func (p *Properties) MustFlag(dst *flag.FlagSet) { + m := make(map[string]*flag.Flag) + dst.VisitAll(func(f *flag.Flag) { + m[f.Name] = f + }) + dst.Visit(func(f *flag.Flag) { + delete(m, f.Name) // overridden + }) + + for name, f := range m { + v, ok := p.Get(name) + if !ok { + continue + } + + if err := f.Value.Set(v); err != nil { + ErrorHandler(err) + } + } +} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go new file mode 100644 index 000000000..c63fcc60d --- /dev/null +++ b/vendor/github.com/magiconair/properties/lex.go @@ -0,0 +1,407 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Parts of the lexer are from the template/text/parser package +// For these parts the following applies: +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of the go 1.2 +// distribution. + +package properties + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos int // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemEOF + itemKey // a key + itemValue // a value + itemComment // a comment +) + +// defines a constant for EOF +const eof = -1 + +// permitted whitespace characters space, FF and TAB +const whitespace = " \f\t" + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + input string // the string being scanned + state stateFn // the next lexing function to enter + pos int // current position in the input + start int // start position of this item + width int // width of last rune read from input + lastPos int // position of most recent item returned by nextItem + runes []rune // scanned runes for this item + items chan item // channel of scanned items +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = w + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + i := item{t, l.start, string(l.runes)} + l.items <- i + l.start = l.pos + l.runes = l.runes[:0] +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// appends the rune to the current value +func (l *lexer) appendRune(r rune) { + l.runes = append(l.runes, r) +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +// acceptRunUntil consumes a run of runes up to a terminator. +func (l *lexer) acceptRunUntil(term rune) { + for term != l.next() { + } + l.backup() +} + +// hasText returns true if the current parsed text is not empty. +func (l *lexer) isNotEmpty() bool { + return l.pos > l.start +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + i := <-l.items + l.lastPos = i.pos + return i +} + +// lex creates a new scanner for the input string. +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + runes: make([]rune, 0, 32), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexBeforeKey(l); l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +// lexBeforeKey scans until a key begins. +func lexBeforeKey(l *lexer) stateFn { + switch r := l.next(); { + case isEOF(r): + l.emit(itemEOF) + return nil + + case isEOL(r): + l.ignore() + return lexBeforeKey + + case isComment(r): + return lexComment + + case isWhitespace(r): + l.ignore() + return lexBeforeKey + + default: + l.backup() + return lexKey + } +} + +// lexComment scans a comment line. The comment character has already been scanned. +func lexComment(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + for { + switch r := l.next(); { + case isEOF(r): + l.ignore() + l.emit(itemEOF) + return nil + case isEOL(r): + l.emit(itemComment) + return lexBeforeKey + default: + l.appendRune(r) + } + } +} + +// lexKey scans the key up to a delimiter +func lexKey(l *lexer) stateFn { + var r rune + +Loop: + for { + switch r = l.next(); { + + case isEscape(r): + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + + case isEndOfKey(r): + l.backup() + break Loop + + case isEOF(r): + break Loop + + default: + l.appendRune(r) + } + } + + if len(l.runes) > 0 { + l.emit(itemKey) + } + + if isEOF(r) { + l.emit(itemEOF) + return nil + } + + return lexBeforeValue +} + +// lexBeforeValue scans the delimiter between key and value. +// Leading and trailing whitespace is ignored. +// We expect to be just after the key. +func lexBeforeValue(l *lexer) stateFn { + l.acceptRun(whitespace) + l.accept(":=") + l.acceptRun(whitespace) + l.ignore() + return lexValue +} + +// lexValue scans text until the end of the line. We expect to be just after the delimiter. +func lexValue(l *lexer) stateFn { + for { + switch r := l.next(); { + case isEscape(r): + if isEOL(l.peek()) { + l.next() + l.acceptRun(whitespace) + } else { + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + } + + case isEOL(r): + l.emit(itemValue) + l.ignore() + return lexBeforeKey + + case isEOF(r): + l.emit(itemValue) + l.emit(itemEOF) + return nil + + default: + l.appendRune(r) + } + } +} + +// scanEscapeSequence scans either one of the escaped characters +// or a unicode literal. We expect to be after the escape character. +func (l *lexer) scanEscapeSequence() error { + switch r := l.next(); { + + case isEscapedCharacter(r): + l.appendRune(decodeEscapedCharacter(r)) + return nil + + case atUnicodeLiteral(r): + return l.scanUnicodeLiteral() + + case isEOF(r): + return fmt.Errorf("premature EOF") + + // silently drop the escape character and append the rune as is + default: + l.appendRune(r) + return nil + } +} + +// scans a unicode literal in the form \uXXXX. We expect to be after the \u. +func (l *lexer) scanUnicodeLiteral() error { + // scan the digits + d := make([]rune, 4) + for i := 0; i < 4; i++ { + d[i] = l.next() + if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { + return fmt.Errorf("invalid unicode literal") + } + } + + // decode the digits into a rune + r, err := strconv.ParseInt(string(d), 16, 0) + if err != nil { + return err + } + + l.appendRune(rune(r)) + return nil +} + +// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. +func decodeEscapedCharacter(r rune) rune { + switch r { + case 'f': + return '\f' + case 'n': + return '\n' + case 'r': + return '\r' + case 't': + return '\t' + default: + return r + } +} + +// atUnicodeLiteral reports whether we are at a unicode literal. +// The escape character has already been consumed. +func atUnicodeLiteral(r rune) bool { + return r == 'u' +} + +// isComment reports whether we are at the start of a comment. +func isComment(r rune) bool { + return r == '#' || r == '!' +} + +// isEndOfKey reports whether the rune terminates the current key. +func isEndOfKey(r rune) bool { + return strings.ContainsRune(" \f\t\r\n:=", r) +} + +// isEOF reports whether we are at EOF. +func isEOF(r rune) bool { + return r == eof +} + +// isEOL reports whether we are at a new line character. +func isEOL(r rune) bool { + return r == '\n' || r == '\r' +} + +// isEscape reports whether the rune is the escape character which +// prefixes unicode literals and other escaped characters. +func isEscape(r rune) bool { + return r == '\\' +} + +// isEscapedCharacter reports whether we are at one of the characters that need escaping. +// The escape character has already been consumed. +func isEscapedCharacter(r rune) bool { + return strings.ContainsRune(" :=fnrt", r) +} + +// isWhitespace reports whether the rune is a whitespace character. +func isWhitespace(r rune) bool { + return strings.ContainsRune(whitespace, r) +} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go new file mode 100644 index 000000000..278cc2ea0 --- /dev/null +++ b/vendor/github.com/magiconair/properties/load.go @@ -0,0 +1,241 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// Encoding specifies encoding of the input data. +type Encoding uint + +const ( + // UTF8 interprets the input data as UTF-8. + UTF8 Encoding = 1 << iota + + // ISO_8859_1 interprets the input data as ISO-8859-1. + ISO_8859_1 +) + +// Load reads a buffer into a Properties struct. +func Load(buf []byte, enc Encoding) (*Properties, error) { + return loadBuf(buf, enc) +} + +// LoadString reads an UTF8 string into a properties struct. +func LoadString(s string) (*Properties, error) { + return loadBuf([]byte(s), UTF8) +} + +// LoadMap creates a new Properties struct from a string map. +func LoadMap(m map[string]string) *Properties { + p := NewProperties() + for k, v := range m { + p.Set(k, v) + } + return p +} + +// LoadFile reads a file into a Properties struct. +func LoadFile(filename string, enc Encoding) (*Properties, error) { + return loadAll([]string{filename}, enc, false) +} + +// LoadFiles reads multiple files in the given order into +// a Properties struct. If 'ignoreMissing' is true then +// non-existent files will not be reported as error. +func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + return loadAll(filenames, enc, ignoreMissing) +} + +// LoadURL reads the content of the URL into a Properties struct. +// +// The encoding is determined via the Content-Type header which +// should be set to 'text/plain'. If the 'charset' parameter is +// missing, 'iso-8859-1' or 'latin1' the encoding is set to +// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the +// encoding is set to UTF-8. A missing content type header is +// interpreted as 'text/plain; charset=utf-8'. +func LoadURL(url string) (*Properties, error) { + return loadAll([]string{url}, UTF8, false) +} + +// LoadURLs reads the content of multiple URLs in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code will +// not be reported as error. See LoadURL for the Content-Type header +// and the encoding. +func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { + return loadAll(urls, UTF8, ignoreMissing) +} + +// LoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. +func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + return loadAll(names, enc, ignoreMissing) +} + +// MustLoadString reads an UTF8 string into a Properties struct and +// panics on error. +func MustLoadString(s string) *Properties { + return must(LoadString(s)) +} + +// MustLoadFile reads a file into a Properties struct and +// panics on error. +func MustLoadFile(filename string, enc Encoding) *Properties { + return must(LoadFile(filename, enc)) +} + +// MustLoadFiles reads multiple files in the given order into +// a Properties struct and panics on error. If 'ignoreMissing' +// is true then non-existent files will not be reported as error. +func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadFiles(filenames, enc, ignoreMissing)) +} + +// MustLoadURL reads the content of a URL into a Properties struct and +// panics on error. +func MustLoadURL(url string) *Properties { + return must(LoadURL(url)) +} + +// MustLoadURLs reads the content of multiple URLs in the given order into a +// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 +// status code will not be reported as error. +func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { + return must(LoadURLs(urls, ignoreMissing)) +} + +// MustLoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. It panics on error. +func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadAll(names, enc, ignoreMissing)) +} + +func loadBuf(buf []byte, enc Encoding) (*Properties, error) { + p, err := parse(convert(buf, enc)) + if err != nil { + return nil, err + } + return p, p.check() +} + +func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + result := NewProperties() + for _, name := range names { + n, err := expandName(name) + if err != nil { + return nil, err + } + var p *Properties + if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") { + p, err = loadURL(n, ignoreMissing) + } else { + p, err = loadFile(n, enc, ignoreMissing) + } + if err != nil { + return nil, err + } + result.Merge(p) + + } + return result, result.check() +} + +func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if ignoreMissing && os.IsNotExist(err) { + LogPrintf("properties: %s not found. skipping", filename) + return NewProperties(), nil + } + return nil, err + } + p, err := parse(convert(data, enc)) + if err != nil { + return nil, err + } + return p, nil +} + +func loadURL(url string, ignoreMissing bool) (*Properties, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) + } + if resp.StatusCode == 404 && ignoreMissing { + LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) + return NewProperties(), nil + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + if err = resp.Body.Close(); err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + + ct := resp.Header.Get("Content-Type") + var enc Encoding + switch strings.ToLower(ct) { + case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + enc = ISO_8859_1 + case "", "text/plain; charset=utf-8": + enc = UTF8 + default: + return nil, fmt.Errorf("properties: invalid content type %s", ct) + } + + p, err := parse(convert(body, enc)) + if err != nil { + return nil, err + } + return p, nil +} + +func must(p *Properties, err error) *Properties { + if err != nil { + ErrorHandler(err) + } + return p +} + +// expandName expands ${ENV_VAR} expressions in a name. +// If the environment variable does not exist then it will be replaced +// with an empty string. Malformed expressions like "${ENV_VAR" will +// be reported as error. +func expandName(name string) (string, error) { + return expand(name, make(map[string]bool), "${", "}", make(map[string]string)) +} + +// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. +// For ISO-8859-1 we can convert each byte straight into a rune since the +// first 256 unicode code points cover ISO-8859-1. +func convert(buf []byte, enc Encoding) string { + switch enc { + case UTF8: + return string(buf) + case ISO_8859_1: + runes := make([]rune, len(buf)) + for i, b := range buf { + runes[i] = rune(b) + } + return string(runes) + default: + ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) + } + panic("ErrorHandler should exit") +} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go new file mode 100644 index 000000000..90f555cb9 --- /dev/null +++ b/vendor/github.com/magiconair/properties/parser.go @@ -0,0 +1,95 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "runtime" +) + +type parser struct { + lex *lexer +} + +func parse(input string) (properties *Properties, err error) { + p := &parser{lex: lex(input)} + defer p.recover(&err) + + properties = NewProperties() + key := "" + comments := []string{} + + for { + token := p.expectOneOf(itemComment, itemKey, itemEOF) + switch token.typ { + case itemEOF: + goto done + case itemComment: + comments = append(comments, token.val) + continue + case itemKey: + key = token.val + if _, ok := properties.m[key]; !ok { + properties.k = append(properties.k, key) + } + } + + token = p.expectOneOf(itemValue, itemEOF) + if len(comments) > 0 { + properties.c[key] = comments + comments = []string{} + } + switch token.typ { + case itemEOF: + properties.m[key] = "" + goto done + case itemValue: + properties.m[key] = token.val + } + } + +done: + return properties, nil +} + +func (p *parser) errorf(format string, args ...interface{}) { + format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +func (p *parser) expect(expected itemType) (token item) { + token = p.lex.nextItem() + if token.typ != expected { + p.unexpected(token) + } + return token +} + +func (p *parser) expectOneOf(expected ...itemType) (token item) { + token = p.lex.nextItem() + for _, v := range expected { + if token.typ == v { + return token + } + } + p.unexpected(token) + panic("unexpected token") +} + +func (p *parser) unexpected(token item) { + p.errorf(token.String()) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (p *parser) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + *errp = e.(error) + } + return +} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go new file mode 100644 index 000000000..85bb18618 --- /dev/null +++ b/vendor/github.com/magiconair/properties/properties.go @@ -0,0 +1,811 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. +// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. + +import ( + "fmt" + "io" + "log" + "os" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// ErrorHandlerFunc defines the type of function which handles failures +// of the MustXXX() functions. An error handler function must exit +// the application after handling the error. +type ErrorHandlerFunc func(error) + +// ErrorHandler is the function which handles failures of the MustXXX() +// functions. The default is LogFatalHandler. +var ErrorHandler ErrorHandlerFunc = LogFatalHandler + +// LogHandlerFunc defines the function prototype for logging errors. +type LogHandlerFunc func(fmt string, args ...interface{}) + +// LogPrintf defines a log handler which uses log.Printf. +var LogPrintf LogHandlerFunc = log.Printf + +// LogFatalHandler handles the error by logging a fatal error and exiting. +func LogFatalHandler(err error) { + log.Fatal(err) +} + +// PanicHandler handles the error by panicking. +func PanicHandler(err error) { + panic(err) +} + +// ----------------------------------------------------------------------------- + +// A Properties contains the key/value pairs from the properties input. +// All values are stored in unexpanded form and are expanded at runtime +type Properties struct { + // Pre-/Postfix for property expansion. + Prefix string + Postfix string + + // DisableExpansion controls the expansion of properties on Get() + // and the check for circular references on Set(). When set to + // true Properties behaves like a simple key/value store and does + // not check for circular references on Get() or on Set(). + DisableExpansion bool + + // Stores the key/value pairs + m map[string]string + + // Stores the comments per key. + c map[string][]string + + // Stores the keys in order of appearance. + k []string +} + +// NewProperties creates a new Properties struct with the default +// configuration for "${key}" expressions. +func NewProperties() *Properties { + return &Properties{ + Prefix: "${", + Postfix: "}", + m: map[string]string{}, + c: map[string][]string{}, + k: []string{}, + } +} + +// Get returns the expanded value for the given key if exists. +// Otherwise, ok is false. +func (p *Properties) Get(key string) (value string, ok bool) { + v, ok := p.m[key] + if p.DisableExpansion { + return v, ok + } + if !ok { + return "", false + } + + expanded, err := p.expand(v) + + // we guarantee that the expanded value is free of + // circular references and malformed expressions + // so we panic if we still get an error here. + if err != nil { + ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) + } + + return expanded, true +} + +// MustGet returns the expanded value for the given key if exists. +// Otherwise, it panics. +func (p *Properties) MustGet(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// ClearComments removes the comments for all keys. +func (p *Properties) ClearComments() { + p.c = map[string][]string{} +} + +// ---------------------------------------------------------------------------- + +// GetComment returns the last comment before the given key or an empty string. +func (p *Properties) GetComment(key string) string { + comments, ok := p.c[key] + if !ok || len(comments) == 0 { + return "" + } + return comments[len(comments)-1] +} + +// ---------------------------------------------------------------------------- + +// GetComments returns all comments that appeared before the given key or nil. +func (p *Properties) GetComments(key string) []string { + if comments, ok := p.c[key]; ok { + return comments + } + return nil +} + +// ---------------------------------------------------------------------------- + +// SetComment sets the comment for the key. +func (p *Properties) SetComment(key, comment string) { + p.c[key] = []string{comment} +} + +// ---------------------------------------------------------------------------- + +// SetComments sets the comments for the key. If the comments are nil then +// all comments for this key are deleted. +func (p *Properties) SetComments(key string, comments []string) { + if comments == nil { + delete(p.c, key) + return + } + p.c[key] = comments +} + +// ---------------------------------------------------------------------------- + +// GetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the default value is returned. +func (p *Properties) GetBool(key string, def bool) bool { + v, err := p.getBool(key) + if err != nil { + return def + } + return v +} + +// MustGetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the function panics. +func (p *Properties) MustGetBool(key string) bool { + v, err := p.getBool(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getBool(key string) (value bool, err error) { + if v, ok := p.Get(key); ok { + return boolVal(v), nil + } + return false, invalidKeyError(key) +} + +func boolVal(v string) bool { + v = strings.ToLower(v) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +// ---------------------------------------------------------------------------- + +// GetDuration parses the expanded value as an time.Duration (in ns) if the +// key exists. If key does not exist or the value cannot be parsed the default +// value is returned. In almost all cases you want to use GetParsedDuration(). +func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { + v, err := p.getInt64(key) + if err != nil { + return def + } + return time.Duration(v) +} + +// MustGetDuration parses the expanded value as an time.Duration (in ns) if +// the key exists. If key does not exist or the value cannot be parsed the +// function panics. In almost all cases you want to use MustGetParsedDuration(). +func (p *Properties) MustGetDuration(key string) time.Duration { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return time.Duration(v) +} + +// ---------------------------------------------------------------------------- + +// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { + s, ok := p.Get(key) + if !ok { + return def + } + v, err := time.ParseDuration(s) + if err != nil { + return def + } + return v +} + +// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetParsedDuration(key string) time.Duration { + s, ok := p.Get(key) + if !ok { + ErrorHandler(invalidKeyError(key)) + } + v, err := time.ParseDuration(s) + if err != nil { + ErrorHandler(err) + } + return v +} + +// ---------------------------------------------------------------------------- + +// GetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetFloat64(key string, def float64) float64 { + v, err := p.getFloat64(key) + if err != nil { + return def + } + return v +} + +// MustGetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetFloat64(key string) float64 { + v, err := p.getFloat64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getFloat64(key string) (value float64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetInt(key string, def int) int { + v, err := p.getInt64(key) + if err != nil { + return def + } + return intRangeCheck(key, v) +} + +// MustGetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetInt(key string) int { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return intRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetInt64 parses the expanded value as an int64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetInt64(key string, def int64) int64 { + v, err := p.getInt64(key) + if err != nil { + return def + } + return v +} + +// MustGetInt64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetInt64(key string) int64 { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getInt64(key string) (value int64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetUint parses the expanded value as an uint if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetUint(key string, def uint) uint { + v, err := p.getUint64(key) + if err != nil { + return def + } + return uintRangeCheck(key, v) +} + +// MustGetUint parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetUint(key string) uint { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return uintRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetUint64 parses the expanded value as an uint64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetUint64(key string, def uint64) uint64 { + v, err := p.getUint64(key) + if err != nil { + return def + } + return v +} + +// MustGetUint64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetUint64(key string) uint64 { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getUint64(key string) (value uint64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetString returns the expanded value for the given key if exists or +// the default value otherwise. +func (p *Properties) GetString(key, def string) string { + if v, ok := p.Get(key); ok { + return v + } + return def +} + +// MustGetString returns the expanded value for the given key if exists or +// panics otherwise. +func (p *Properties) MustGetString(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// Filter returns a new properties object which contains all properties +// for which the key matches the pattern. +func (p *Properties) Filter(pattern string) (*Properties, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + + return p.FilterRegexp(re), nil +} + +// FilterRegexp returns a new properties object which contains all properties +// for which the key matches the regular expression. +func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { + pp := NewProperties() + for _, k := range p.k { + if re.MatchString(k) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterPrefix returns a new properties object with a subset of all keys +// with the given prefix. +func (p *Properties) FilterPrefix(prefix string) *Properties { + pp := NewProperties() + for _, k := range p.k { + if strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterStripPrefix returns a new properties object with a subset of all keys +// with the given prefix and the prefix removed from the keys. +func (p *Properties) FilterStripPrefix(prefix string) *Properties { + pp := NewProperties() + n := len(prefix) + for _, k := range p.k { + if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference + // TODO(fs): this function should probably return an error but the signature is fixed + pp.Set(k[n:], p.m[k]) + } + } + return pp +} + +// Len returns the number of keys. +func (p *Properties) Len() int { + return len(p.m) +} + +// Keys returns all keys in the same order as in the input. +func (p *Properties) Keys() []string { + keys := make([]string, len(p.k)) + copy(keys, p.k) + return keys +} + +// Set sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. If the value contains a +// circular reference or a malformed expression then +// an error is returned. +// An empty key is silently ignored. +func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { + if key == "" { + return "", false, nil + } + + // if expansion is disabled we allow circular references + if p.DisableExpansion { + prev, ok = p.Get(key) + p.m[key] = value + if !ok { + p.k = append(p.k, key) + } + return prev, ok, nil + } + + // to check for a circular reference we temporarily need + // to set the new value. If there is an error then revert + // to the previous state. Only if all tests are successful + // then we add the key to the p.k list. + prev, ok = p.Get(key) + p.m[key] = value + + // now check for a circular reference + _, err = p.expand(value) + if err != nil { + + // revert to the previous state + if ok { + p.m[key] = prev + } else { + delete(p.m, key) + } + + return "", false, err + } + + if !ok { + p.k = append(p.k, key) + } + + return prev, ok, nil +} + +// SetValue sets property key to the default string value +// as defined by fmt.Sprintf("%v"). +func (p *Properties) SetValue(key string, value interface{}) error { + _, _, err := p.Set(key, fmt.Sprintf("%v", value)) + return err +} + +// MustSet sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. An empty key is silently ignored. +func (p *Properties) MustSet(key, value string) (prev string, ok bool) { + prev, ok, err := p.Set(key, value) + if err != nil { + ErrorHandler(err) + } + return prev, ok +} + +// String returns a string of all expanded 'key = value' pairs. +func (p *Properties) String() string { + var s string + for _, key := range p.k { + value, _ := p.Get(key) + s = fmt.Sprintf("%s%s = %s\n", s, key, value) + } + return s +} + +// Write writes all unexpanded 'key = value' pairs to the given writer. +// Write returns the number of bytes written and any write error encountered. +func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { + return p.WriteComment(w, "", enc) +} + +// WriteComment writes all unexpanced 'key = value' pairs to the given writer. +// If prefix is not empty then comments are written with a blank line and the +// given prefix. The prefix should be either "# " or "! " to be compatible with +// the properties file format. Otherwise, the properties parser will not be +// able to read the file back in. It returns the number of bytes written and +// any write error encountered. +func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { + var x int + + for _, key := range p.k { + value := p.m[key] + + if prefix != "" { + if comments, ok := p.c[key]; ok { + // don't print comments if they are all empty + allEmpty := true + for _, c := range comments { + if c != "" { + allEmpty = false + break + } + } + + if !allEmpty { + // add a blank line between entries but not at the top + if len(comments) > 0 && n > 0 { + x, err = fmt.Fprintln(w) + if err != nil { + return + } + n += x + } + + for _, c := range comments { + x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) + if err != nil { + return + } + n += x + } + } + } + } + + x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) + if err != nil { + return + } + n += x + } + return +} + +// Map returns a copy of the properties as a map. +func (p *Properties) Map() map[string]string { + m := make(map[string]string) + for k, v := range p.m { + m[k] = v + } + return m +} + +// FilterFunc returns a copy of the properties which includes the values which passed all filters. +func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { + pp := NewProperties() +outer: + for k, v := range p.m { + for _, f := range filters { + if !f(k, v) { + continue outer + } + pp.Set(k, v) + } + } + return pp +} + +// ---------------------------------------------------------------------------- + +// Delete removes the key and its comments. +func (p *Properties) Delete(key string) { + delete(p.m, key) + delete(p.c, key) + newKeys := []string{} + for _, k := range p.k { + if k != key { + newKeys = append(newKeys, k) + } + } + p.k = newKeys +} + +// Merge merges properties, comments and keys from other *Properties into p +func (p *Properties) Merge(other *Properties) { + for k, v := range other.m { + p.m[k] = v + } + for k, v := range other.c { + p.c[k] = v + } + +outer: + for _, otherKey := range other.k { + for _, key := range p.k { + if otherKey == key { + continue outer + } + } + p.k = append(p.k, otherKey) + } +} + +// ---------------------------------------------------------------------------- + +// check expands all values and returns an error if a circular reference or +// a malformed expression was found. +func (p *Properties) check() error { + for _, value := range p.m { + if _, err := p.expand(value); err != nil { + return err + } + } + return nil +} + +func (p *Properties) expand(input string) (string, error) { + // no pre/postfix -> nothing to expand + if p.Prefix == "" && p.Postfix == "" { + return input, nil + } + + return expand(input, make(map[string]bool), p.Prefix, p.Postfix, p.m) +} + +// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. +// The function keeps track of the keys that were already expanded and stops if it +// detects a circular reference or a malformed expression of the form '(prefix)key'. +func expand(s string, keys map[string]bool, prefix, postfix string, values map[string]string) (string, error) { + start := strings.Index(s, prefix) + if start == -1 { + return s, nil + } + + keyStart := start + len(prefix) + keyLen := strings.Index(s[keyStart:], postfix) + if keyLen == -1 { + return "", fmt.Errorf("malformed expression") + } + + end := keyStart + keyLen + len(postfix) - 1 + key := s[keyStart : keyStart+keyLen] + + // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) + + if _, ok := keys[key]; ok { + return "", fmt.Errorf("circular reference") + } + + val, ok := values[key] + if !ok { + val = os.Getenv(key) + } + + // remember that we've seen the key + keys[key] = true + + return expand(s[:start]+val+s[end+1:], keys, prefix, postfix, values) +} + +// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. +func encode(s string, special string, enc Encoding) string { + switch enc { + case UTF8: + return encodeUtf8(s, special) + case ISO_8859_1: + return encodeIso(s, special) + default: + panic(fmt.Sprintf("unsupported encoding %v", enc)) + } +} + +func encodeUtf8(s string, special string) string { + v := "" + for pos := 0; pos < len(s); { + r, w := utf8.DecodeRuneInString(s[pos:]) + pos += w + v += escape(r, special) + } + return v +} + +func encodeIso(s string, special string) string { + var r rune + var w int + var v string + for pos := 0; pos < len(s); { + switch r, w = utf8.DecodeRuneInString(s[pos:]); { + case r < 1<<8: // single byte rune -> escape special chars only + v += escape(r, special) + case r < 1<<16: // two byte rune -> unicode literal + v += fmt.Sprintf("\\u%04x", r) + default: // more than two bytes per rune -> can't encode + v += "?" + } + pos += w + } + return v +} + +func escape(r rune, special string) string { + switch r { + case '\f': + return "\\f" + case '\n': + return "\\n" + case '\r': + return "\\r" + case '\t': + return "\\t" + default: + if strings.ContainsRune(special, r) { + return "\\" + string(r) + } + return string(r) + } +} + +func invalidKeyError(key string) error { + return fmt.Errorf("unknown property: %s", key) +} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go new file mode 100644 index 000000000..2e907d540 --- /dev/null +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -0,0 +1,31 @@ +// Copyright 2017 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "math" +) + +// make this a var to overwrite it in a test +var is32Bit = ^uint(0) == math.MaxUint32 + +// intRangeCheck checks if the value fits into the int type and +// panics if it does not. +func intRangeCheck(key string, v int64) int { + if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return int(v) +} + +// uintRangeCheck checks if the value fits into the uint type and +// panics if it does not. +func uintRangeCheck(key string, v uint64) uint { + if is32Bit && v > math.MaxUint32 { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return uint(v) +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 000000000..d70706d5b --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 000000000..8996b0221 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,137 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If "getent" is missing, ignore it + if err == exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..7f3fe9a96 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..659d6885f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..aa91f76ce --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,151 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = reflect.ValueOf(data).Type() + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..a367a95b6 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,767 @@ +// The mapstructure package exposes functionality to convert an +// abitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } + } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + realVal := reflect.New(valElemType) + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + if fieldType.Anonymous { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) + continue + } + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey, _ := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey, _ := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey, _ := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/pascaldekloe/goe/verify/values.go b/vendor/github.com/pascaldekloe/goe/verify/values.go new file mode 100644 index 000000000..e214033c3 --- /dev/null +++ b/vendor/github.com/pascaldekloe/goe/verify/values.go @@ -0,0 +1,168 @@ +package verify + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +// Values verifies that got has all the content, and only the content, defined by want. +func Values(t *testing.T, name string, got, want interface{}) (ok bool) { + tr := travel{} + tr.values(reflect.ValueOf(got), reflect.ValueOf(want), nil) + + fail := tr.report(name) + if fail != "" { + t.Error(fail) + return false + } + + return true +} + +func (t *travel) values(got, want reflect.Value, path []*segment) { + if !want.IsValid() { + if got.IsValid() { + t.differ(path, "Unwanted %s", got.Type()) + } + return + } + if !got.IsValid() { + t.differ(path, "Missing %s", want.Type()) + return + } + + if got.Type() != want.Type() { + t.differ(path, "Got type %s, want %s", got.Type(), want.Type()) + return + } + + switch got.Kind() { + + case reflect.Struct: + seg := &segment{format: "/%s"} + path = append(path, seg) + for i, n := 0, got.NumField(); i < n; i++ { + seg.x = got.Type().Field(i).Name + t.values(got.Field(i), want.Field(i), path) + } + path = path[:len(path)-1] + + case reflect.Slice, reflect.Array: + n := got.Len() + if n != want.Len() { + t.differ(path, "Got %d elements, want %d", n, want.Len()) + return + } + + seg := &segment{format: "[%d]"} + path = append(path, seg) + for i := 0; i < n; i++ { + seg.x = i + t.values(got.Index(i), want.Index(i), path) + } + path = path[:len(path)-1] + + case reflect.Ptr, reflect.Interface: + t.values(got.Elem(), want.Elem(), path) + + case reflect.Map: + seg := &segment{} + path = append(path, seg) + for _, key := range want.MapKeys() { + applyKeySeg(seg, key) + t.values(got.MapIndex(key), want.MapIndex(key), path) + } + + for _, key := range got.MapKeys() { + v := want.MapIndex(key) + if v.IsValid() { + continue + } + applyKeySeg(seg, key) + t.values(got.MapIndex(key), v, path) + } + path = path[:len(path)-1] + + case reflect.Func: + t.differ(path, "Can't compare functions") + + default: + a, b := asInterface(got), asInterface(want) + if a != b { + t.differ(path, differMsg(a, b)) + } + + } +} + +func applyKeySeg(dst *segment, key reflect.Value) { + if key.Kind() == reflect.String { + dst.format = "[%q]" + } else { + dst.format = "[%v]" + } + dst.x = asInterface(key) +} + +func asInterface(v reflect.Value) interface{} { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() + case reflect.Float32, reflect.Float64: + return v.Float() + case reflect.Complex64, reflect.Complex128: + return v.Complex() + case reflect.String: + return v.String() + default: + return v.Interface() + } +} + +func differMsg(got, want interface{}) string { + switch got.(type) { + case int64: + g, w := got.(int64), want.(int64) + if g < 0xA && g > -0xA && w < 0xA && w > -0xA { + return fmt.Sprintf("Got %d, want %d", got, want) + } + return fmt.Sprintf("Got %d (0x%x), want %d (0x%x)", got, got, want, want) + case uint64: + g, w := got.(uint64), want.(uint64) + if g < 0xA && w < 0xA { + return fmt.Sprintf("Got %d, want %d", got, want) + } + return fmt.Sprintf("Got %d (0x%x), want %d (0x%x)", got, got, want, want) + case float64, complex128: + return fmt.Sprintf("Got %f (%e), want %f (%e)", got, got, want, want) + case string: + a, b := got.(string), want.(string) + if len(a) > len(b) { + a, b = b, a + } + r := strings.NewReader(b) + + var differAt int + for i, c := range a { + o, _, _ := r.ReadRune() + if c != o { + differAt = i + break + } + } + + format := "Got %q, want %q" + if differAt > 0 { + format += fmt.Sprintf("\n %s^", strings.Repeat(" ", differAt)) + } + return fmt.Sprintf(format, got, want) + default: + return fmt.Sprintf("Got %v, want %v", got, want) + } +} diff --git a/vendor/github.com/pascaldekloe/goe/verify/verify.go b/vendor/github.com/pascaldekloe/goe/verify/verify.go new file mode 100644 index 000000000..d98c014fc --- /dev/null +++ b/vendor/github.com/pascaldekloe/goe/verify/verify.go @@ -0,0 +1,73 @@ +// Package verify offers convenience routenes for content verification. +package verify + +import ( + "bytes" + "fmt" + "path" + "runtime" + "strings" +) + +// travel is the verification state +type travel struct { + diffs []differ +} + +// differ is a verification failure. +type differ struct { + // path is the expression to the content. + path string + // msg has a reason. + msg string +} + +// segment is a differ.path component used for lazy formatting. +type segment struct { + format string + x interface{} +} + +func (t *travel) differ(path []*segment, msg string, args ...interface{}) { + var buf bytes.Buffer + for _, s := range path { + buf.WriteString(fmt.Sprintf(s.format, s.x)) + } + + t.diffs = append(t.diffs, differ{ + msg: fmt.Sprintf(msg, args...), + path: buf.String(), + }) +} + +func (t *travel) report(name string) string { + if len(t.diffs) == 0 { + return "" + } + + var buf bytes.Buffer + + buf.WriteString("verification for ") + buf.WriteString(name) + if _, file, lineno, ok := runtime.Caller(2); ok { + fmt.Fprintf(&buf, " at %s:%d", path.Base(file), lineno) + } + buf.WriteByte(':') + + for _, d := range t.diffs { + buf.WriteByte('\n') + if d.path != "" { + buf.WriteString(d.path) + buf.WriteString(": ") + } + lines := strings.Split(d.msg, "\n") + buf.WriteString(lines[0]) + for _, l := range lines[1:] { + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", len(d.path)+2)) + buf.WriteString(l) + } + } + + return buf.String() +} diff --git a/vendor/github.com/pubnub/go-metrics-statsd/LICENSE b/vendor/github.com/pubnub/go-metrics-statsd/LICENSE new file mode 100644 index 000000000..78635365d --- /dev/null +++ b/vendor/github.com/pubnub/go-metrics-statsd/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 PubNub + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pubnub/go-metrics-statsd/README.md b/vendor/github.com/pubnub/go-metrics-statsd/README.md new file mode 100644 index 000000000..a6cc5f866 --- /dev/null +++ b/vendor/github.com/pubnub/go-metrics-statsd/README.md @@ -0,0 +1,18 @@ +## Intro +This library is based on the cyberdelia [graphite library](https://github.com/cyberdelia/go-metrics-graphite) just modifying the wireformat for statsd + +This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) +library which will post the metrics to Statsd. It was originally part of the +`go-metrics` library itself, but has been split off to make maintenance of +both the core library and the client easier. + +### Usage + +```go +import "github.com/pubnub/go-metrics-statsd" + + +go statsd.StatsD(metrics.DefaultRegistry, + 1*time.Second, "some.prefix", addr) +``` + diff --git a/vendor/github.com/pubnub/go-metrics-statsd/statsd.go b/vendor/github.com/pubnub/go-metrics-statsd/statsd.go new file mode 100644 index 000000000..27b8ea7e6 --- /dev/null +++ b/vendor/github.com/pubnub/go-metrics-statsd/statsd.go @@ -0,0 +1,117 @@ +package statsd + +import ( + "bufio" + "fmt" + "log" + "net" + "reflect" + "strconv" + "strings" + "time" + + "github.com/rcrowley/go-metrics" +) + +// StatsDConfig provides a container with +// configuration parameters for the StatsD exporter +type StatsDConfig struct { + Addr *net.UDPAddr // Network address to connect to + Registry metrics.Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// StatsD is a blocking exporter function which reports metrics in r +// to a statsd server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func StatsD(r metrics.Registry, d time.Duration, prefix string, addr *net.UDPAddr) { + StatsDWithConfig(StatsDConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// StatsDWithConfig is a blocking exporter function just like StatsD, +// but it takes a StatsDConfig instead. +func StatsDWithConfig(c StatsDConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := statsd(&c); nil != err { + log.Println(err) + } + } +} + +func statsd(c *StatsDConfig) error { + du := float64(c.DurationUnit) + + conn, err := net.DialUDP("udp", nil, c.Addr) + + if nil != err { + return err + } + + // this will be executed when statsd func returns + defer conn.Close() + + // constuct a buffer to write statsd wire format + w := bufio.NewWriter(conn) + + // for each metric in the registry format into statsd wireformat and send + c.Registry.Each(func(name string, metric interface{}) { + switch m := metric.(type) { + case metrics.Counter: + fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, m.Count()) + case metrics.Gauge: + fmt.Fprintf(w, "%s--%s.value:%d|g\n", c.Prefix, name, m.Value()) + case metrics.GaugeFloat64: + fmt.Fprintf(w, "%s--%s.value:%f|g\n", c.Prefix, name, m.Value()) + case metrics.Histogram: + h := m.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, h.Count()) + fmt.Fprintf(w, "%s--%s.min:%d|g\n", c.Prefix, name, h.Min()) + fmt.Fprintf(w, "%s--%s.max:%d|g\n", c.Prefix, name, h.Max()) + fmt.Fprintf(w, "%s--%s.mean:%.2f|g\n", c.Prefix, name, h.Mean()) + fmt.Fprintf(w, "%s--%s.std-dev:%.2f|g\n", c.Prefix, name, h.StdDev()) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s--%s.%s-percentile:%.2f|g\n", c.Prefix, name, key, ps[psIdx]) + } + case metrics.Meter: + ss := m.Snapshot() + fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, ss.Count()) + fmt.Fprintf(w, "%s--%s.one-minute:%.2f|g\n", c.Prefix, name, ss.Rate1()) + fmt.Fprintf(w, "%s--%s.five-minute:%.2f|g\n", c.Prefix, name, ss.Rate5()) + fmt.Fprintf(w, "%s--%s.fifteen-minute:%.2f|g\n", c.Prefix, name, ss.Rate15()) + fmt.Fprintf(w, "%s--%s.mean:%.2f|g\n", c.Prefix, name, ss.RateMean()) + case metrics.Timer: + t := m.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s--%s.count:%d|c\n", c.Prefix, name, t.Count()) + fmt.Fprintf(w, "%s--%s.min:%d|g\n", c.Prefix, name, t.Min()/int64(du)) + fmt.Fprintf(w, "%s--%s.max:%d|g\n", c.Prefix, name, t.Max()/int64(du)) + fmt.Fprintf(w, "%s--%s.mean:%.2f|g\n", c.Prefix, name, t.Mean()/du) + fmt.Fprintf(w, "%s--%s.std-dev:%.2f|g\n", c.Prefix, name, t.StdDev()/du) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s--%s.%s-percentile:%.2f|g\n", c.Prefix, name, key, ps[psIdx]/du) + } + fmt.Fprintf(w, "%s--%s.one-minute:%.2f|g\n", c.Prefix, name, t.Rate1()) + fmt.Fprintf(w, "%s--%s.five-minute:%.2f|g\n", c.Prefix, name, t.Rate5()) + fmt.Fprintf(w, "%s--%s.fifteen-minute:%.2f|g\n", c.Prefix, name, t.Rate15()) + fmt.Fprintf(w, "%s--%s.mean-rate:%.2f|g\n", c.Prefix, name, t.RateMean()) + default: + log.Println("[WARN] No Metric", c.Prefix, name, reflect.TypeOf(m)) + } + w.Flush() + }) + + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore new file mode 100644 index 000000000..83c8f8237 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/.gitignore @@ -0,0 +1,9 @@ +*.[68] +*.a +*.out +*.swp +_obj +_testmain.go +cmd/metrics-bench/metrics-bench +cmd/metrics-example/metrics-example +cmd/never-read/never-read diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml new file mode 100644 index 000000000..117763e65 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + +script: + - ./validate.sh + +# this should give us faster builds according to +# http://docs.travis-ci.com/user/migrating-from-legacy/ +sudo: false diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE new file mode 100644 index 000000000..363fa9ee7 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md new file mode 100644 index 000000000..b7356b5fc --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -0,0 +1,168 @@ +go-metrics +========== + +![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) + +Go port of Coda Hale's Metrics library: . + +Documentation: . + +Usage +----- + +Create and update metrics: + +```go +c := metrics.NewCounter() +metrics.Register("foo", c) +c.Inc(47) + +g := metrics.NewGauge() +metrics.Register("bar", g) +g.Update(47) + +r := NewRegistry() +g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) + +s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) +h := metrics.NewHistogram(s) +metrics.Register("baz", h) +h.Update(47) + +m := metrics.NewMeter() +metrics.Register("quux", m) +m.Mark(47) + +t := metrics.NewTimer() +metrics.Register("bang", t) +t.Time(func() {}) +t.Update(47) +``` + +Register() is not threadsafe. For threadsafe metric registration use +GetOrRegister: + +```go +t := metrics.GetOrRegisterTimer("account.create.latency", nil) +t.Time(func() {}) +t.Update(47) +``` + +**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will +leak memory: + +```go +// Will call Stop() on the Meter to allow for garbage collection +metrics.Unregister("quux") +// Or similarly for a Timer that embeds a Meter +metrics.Unregister("bang") +``` + +Periodically log every metric in human-readable form to standard error: + +```go +go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) +``` + +Periodically log every metric in slightly-more-parseable form to syslog: + +```go +w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") +go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) +``` + +Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): + +```go + +import "github.com/cyberdelia/go-metrics-graphite" + +addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") +go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) +``` + +Periodically emit every metric into InfluxDB: + +**NOTE:** this has been pulled out of the library due to constant fluctuations +in the InfluxDB API. In fact, all client libraries are on their way out. see +issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and +[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. + +```go +import "github.com/vrischmann/go-metrics-influxdb" + +go influxdb.InfluxDB(metrics.DefaultRegistry, + 10e9, + "127.0.0.1:8086", + "database-name", + "username", + "password" +) +``` + +Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): + +**Note**: the client included with this repository under the `librato` package +has been deprecated and moved to the repository linked above. + +```go +import "github.com/mihasya/go-metrics-librato" + +go librato.Librato(metrics.DefaultRegistry, + 10e9, // interval + "example@example.com", // account owner email address + "token", // Librato API token + "hostname", // source + []float64{0.95}, // percentiles to send + time.Millisecond, // time unit +) +``` + +Periodically emit every metric to StatHat: + +```go +import "github.com/rcrowley/go-metrics/stathat" + +go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") +``` + +Maintain all metrics along with expvars at `/debug/metrics`: + +This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) +but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars +as well as all your go-metrics. + + +```go +import "github.com/rcrowley/go-metrics/exp" + +exp.Exp(metrics.DefaultRegistry) +``` + +Installation +------------ + +```sh +go get github.com/rcrowley/go-metrics +``` + +StatHat support additionally requires their Go client: + +```sh +go get github.com/stathat/go +``` + +Publishing Metrics +------------------ + +Clients are available for the following destinations: + +* Librato - https://github.com/mihasya/go-metrics-librato +* Graphite - https://github.com/cyberdelia/go-metrics-graphite +* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb +* Ganglia - https://github.com/appscode/metlia +* Prometheus - https://github.com/deathowl/go-metrics-prometheus +* DataDog - https://github.com/syntaqx/go-metrics-datadog +* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx +* Honeycomb - https://github.com/getspine/go-metrics-honeycomb +* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go new file mode 100644 index 000000000..bb7b039cb --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/counter.go @@ -0,0 +1,112 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// GetOrRegisterCounter returns an existing Counter or constructs and registers +// a new StandardCounter. +func GetOrRegisterCounter(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounter).(Counter) +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + if UseNilMetrics { + return NilCounter{} + } + return &StandardCounter{0} +} + +// NewRegisteredCounter constructs and registers a new StandardCounter. +func NewRegisteredCounter(name string, r Registry) Counter { + c := NewCounter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// NilCounter is a no-op Counter. +type NilCounter struct{} + +// Clear is a no-op. +func (NilCounter) Clear() {} + +// Count is a no-op. +func (NilCounter) Count() int64 { return 0 } + +// Dec is a no-op. +func (NilCounter) Dec(i int64) {} + +// Inc is a no-op. +func (NilCounter) Inc(i int64) {} + +// Snapshot is a no-op. +func (NilCounter) Snapshot() Counter { return NilCounter{} } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go new file mode 100644 index 000000000..043ccefab --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -0,0 +1,76 @@ +package metrics + +import ( + "runtime/debug" + "time" +) + +var ( + debugMetrics struct { + GCStats struct { + LastGC Gauge + NumGC Gauge + Pause Histogram + //PauseQuantiles Histogram + PauseTotal Gauge + } + ReadGCStats Timer + } + gcStats debug.GCStats +) + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called as a goroutine. +func CaptureDebugGCStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureDebugGCStatsOnce(r) + } +} + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called in a background goroutine. +// Giving a registry which has not been given to RegisterDebugGCStats will +// panic. +// +// Be careful (but much less so) with this because debug.ReadGCStats calls +// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world +// operation, isn't something you want to be doing all the time. +func CaptureDebugGCStatsOnce(r Registry) { + lastGC := gcStats.LastGC + t := time.Now() + debug.ReadGCStats(&gcStats) + debugMetrics.ReadGCStats.UpdateSince(t) + + debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) + debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) + if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { + debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) + } + //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) + debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) +} + +// Register metrics for the Go garbage collector statistics exported in +// debug.GCStats. The metrics are named by their fully-qualified Go symbols, +// i.e. debug.GCStats.PauseTotal. +func RegisterDebugGCStats(r Registry) { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() + + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) +} + +// Allocate an initial slice for gcStats.Pause to avoid allocations during +// normal operation. +func init() { + gcStats.Pause = make([]time.Duration, 11) +} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go new file mode 100644 index 000000000..a8183dd7e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -0,0 +1,138 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" +) + +// EWMAs continuously calculate an exponentially-weighted moving average +// based on an outside source of clock ticks. +type EWMA interface { + Rate() float64 + Snapshot() EWMA + Tick() + Update(int64) +} + +// NewEWMA constructs a new EWMA with the given alpha. +func NewEWMA(alpha float64) EWMA { + if UseNilMetrics { + return NilEWMA{} + } + return &StandardEWMA{alpha: alpha} +} + +// NewEWMA1 constructs a new EWMA for a one-minute moving average. +func NewEWMA1() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/1)) +} + +// NewEWMA5 constructs a new EWMA for a five-minute moving average. +func NewEWMA5() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/5)) +} + +// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. +func NewEWMA15() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/15)) +} + +// EWMASnapshot is a read-only copy of another EWMA. +type EWMASnapshot float64 + +// Rate returns the rate of events per second at the time the snapshot was +// taken. +func (a EWMASnapshot) Rate() float64 { return float64(a) } + +// Snapshot returns the snapshot. +func (a EWMASnapshot) Snapshot() EWMA { return a } + +// Tick panics. +func (EWMASnapshot) Tick() { + panic("Tick called on an EWMASnapshot") +} + +// Update panics. +func (EWMASnapshot) Update(int64) { + panic("Update called on an EWMASnapshot") +} + +// NilEWMA is a no-op EWMA. +type NilEWMA struct{} + +// Rate is a no-op. +func (NilEWMA) Rate() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } + +// Tick is a no-op. +func (NilEWMA) Tick() {} + +// Update is a no-op. +func (NilEWMA) Update(n int64) {} + +// StandardEWMA is the standard implementation of an EWMA and tracks the number +// of uncounted events and processes them on each tick. It uses the +// sync/atomic package to manage uncounted events. +type StandardEWMA struct { + uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + alpha float64 + rate uint64 + init uint32 + mutex sync.Mutex +} + +// Rate returns the moving average rate of events per second. +func (a *StandardEWMA) Rate() float64 { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) + return currentRate +} + +// Snapshot returns a read-only copy of the EWMA. +func (a *StandardEWMA) Snapshot() EWMA { + return EWMASnapshot(a.Rate()) +} + +// Tick ticks the clock to update the moving average. It assumes it is called +// every five seconds. +func (a *StandardEWMA) Tick() { + // Optimization to avoid mutex locking in the hot-path. + if atomic.LoadUint32(&a.init) == 1 { + a.updateRate(a.fetchInstantRate()) + } else { + // Slow-path: this is only needed on the first Tick() and preserves transactional updating + // of init and rate in the else block. The first conditional is needed below because + // a different thread could have set a.init = 1 between the time of the first atomic load and when + // the lock was acquired. + a.mutex.Lock() + if atomic.LoadUint32(&a.init) == 1 { + // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section + // but again, this section is only invoked on the first successful Tick() operation. + a.updateRate(a.fetchInstantRate()) + } else { + atomic.StoreUint32(&a.init, 1) + atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) + } + a.mutex.Unlock() + } +} + +func (a *StandardEWMA) fetchInstantRate() float64 { + count := atomic.LoadInt64(&a.uncounted) + atomic.AddInt64(&a.uncounted, -count) + instantRate := float64(count) / float64(5e9) + return instantRate +} + +func (a *StandardEWMA) updateRate(instantRate float64) { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) + currentRate += a.alpha * (instantRate - currentRate) + atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) +} + +// Update adds n uncounted events. +func (a *StandardEWMA) Update(n int64) { + atomic.AddInt64(&a.uncounted, n) +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go new file mode 100644 index 000000000..cb57a9388 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge.go @@ -0,0 +1,120 @@ +package metrics + +import "sync/atomic" + +// Gauges hold an int64 value that can be set arbitrarily. +type Gauge interface { + Snapshot() Gauge + Update(int64) + Value() int64 +} + +// GetOrRegisterGauge returns an existing Gauge or constructs and registers a +// new StandardGauge. +func GetOrRegisterGauge(name string, r Registry) Gauge { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGauge).(Gauge) +} + +// NewGauge constructs a new StandardGauge. +func NewGauge() Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &StandardGauge{0} +} + +// NewRegisteredGauge constructs and registers a new StandardGauge. +func NewRegisteredGauge(name string, r Registry) Gauge { + c := NewGauge() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGauge(f func() int64) Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &FunctionalGauge{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { + c := NewFunctionalGauge(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeSnapshot is a read-only copy of another Gauge. +type GaugeSnapshot int64 + +// Snapshot returns the snapshot. +func (g GaugeSnapshot) Snapshot() Gauge { return g } + +// Update panics. +func (GaugeSnapshot) Update(int64) { + panic("Update called on a GaugeSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeSnapshot) Value() int64 { return int64(g) } + +// NilGauge is a no-op Gauge. +type NilGauge struct{} + +// Snapshot is a no-op. +func (NilGauge) Snapshot() Gauge { return NilGauge{} } + +// Update is a no-op. +func (NilGauge) Update(v int64) {} + +// Value is a no-op. +func (NilGauge) Value() int64 { return 0 } + +// StandardGauge is the standard implementation of a Gauge and uses the +// sync/atomic package to manage a single int64 value. +type StandardGauge struct { + value int64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGauge) Snapshot() Gauge { + return GaugeSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGauge) Update(v int64) { + atomic.StoreInt64(&g.value, v) +} + +// Value returns the gauge's current value. +func (g *StandardGauge) Value() int64 { + return atomic.LoadInt64(&g.value) +} + +// FunctionalGauge returns value from given function +type FunctionalGauge struct { + value func() int64 +} + +// Value returns the gauge's current value. +func (g FunctionalGauge) Value() int64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGauge) Update(int64) { + panic("Update called on a FunctionalGauge") +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go new file mode 100644 index 000000000..3962e6db0 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -0,0 +1,125 @@ +package metrics + +import ( + "math" + "sync/atomic" +) + +// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64 interface { + Snapshot() GaugeFloat64 + Update(float64) + Value() float64 +} + +// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a +// new StandardGaugeFloat64. +func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) +} + +// NewGaugeFloat64 constructs a new StandardGaugeFloat64. +func NewGaugeFloat64() GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &StandardGaugeFloat64{ + value: 0.0, + } +} + +// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. +func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { + c := NewGaugeFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &FunctionalGaugeFloat64{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { + c := NewFunctionalGaugeFloat64(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type GaugeFloat64Snapshot float64 + +// Snapshot returns the snapshot. +func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } + +// Update panics. +func (GaugeFloat64Snapshot) Update(float64) { + panic("Update called on a GaugeFloat64Snapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeFloat64 struct{} + +// Snapshot is a no-op. +func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } + +// Update is a no-op. +func (NilGaugeFloat64) Update(v float64) {} + +// Value is a no-op. +func (NilGaugeFloat64) Value() float64 { return 0.0 } + +// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses +// sync.Mutex to manage a single float64 value. +type StandardGaugeFloat64 struct { + value uint64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { + return GaugeFloat64Snapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeFloat64) Update(v float64) { + atomic.StoreUint64(&g.value, math.Float64bits(v)) +} + +// Value returns the gauge's current value. +func (g *StandardGaugeFloat64) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&g.value)) +} + +// FunctionalGaugeFloat64 returns value from given function +type FunctionalGaugeFloat64 struct { + value func() float64 +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeFloat64) Value() float64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeFloat64) Update(float64) { + panic("Update called on a FunctionalGaugeFloat64") +} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go new file mode 100644 index 000000000..abd0a7d29 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/graphite.go @@ -0,0 +1,113 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go new file mode 100644 index 000000000..445131cae --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go @@ -0,0 +1,61 @@ +package metrics + +// Healthchecks hold an error value describing an arbitrary up/down status. +type Healthcheck interface { + Check() + Error() error + Healthy() + Unhealthy(error) +} + +// NewHealthcheck constructs a new Healthcheck which will use the given +// function to update its status. +func NewHealthcheck(f func(Healthcheck)) Healthcheck { + if UseNilMetrics { + return NilHealthcheck{} + } + return &StandardHealthcheck{nil, f} +} + +// NilHealthcheck is a no-op. +type NilHealthcheck struct{} + +// Check is a no-op. +func (NilHealthcheck) Check() {} + +// Error is a no-op. +func (NilHealthcheck) Error() error { return nil } + +// Healthy is a no-op. +func (NilHealthcheck) Healthy() {} + +// Unhealthy is a no-op. +func (NilHealthcheck) Unhealthy(error) {} + +// StandardHealthcheck is the standard implementation of a Healthcheck and +// stores the status and a function to call to update the status. +type StandardHealthcheck struct { + err error + f func(Healthcheck) +} + +// Check runs the healthcheck function to update the healthcheck's status. +func (h *StandardHealthcheck) Check() { + h.f(h) +} + +// Error returns the healthcheck's status, which will be nil if it is healthy. +func (h *StandardHealthcheck) Error() error { + return h.err +} + +// Healthy marks the healthcheck as healthy. +func (h *StandardHealthcheck) Healthy() { + h.err = nil +} + +// Unhealthy marks the healthcheck as unhealthy. The error is stored and +// may be retrieved by the Error method. +func (h *StandardHealthcheck) Unhealthy(err error) { + h.err = err +} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go new file mode 100644 index 000000000..dbc837fe4 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/histogram.go @@ -0,0 +1,202 @@ +package metrics + +// Histograms calculate distribution statistics from a series of int64 values. +type Histogram interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Sample() Sample + Snapshot() Histogram + StdDev() float64 + Sum() int64 + Update(int64) + Variance() float64 +} + +// GetOrRegisterHistogram returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) +} + +// NewHistogram constructs a new StandardHistogram from a Sample. +func NewHistogram(s Sample) Histogram { + if UseNilMetrics { + return NilHistogram{} + } + return &StandardHistogram{sample: s} +} + +// NewRegisteredHistogram constructs and registers a new StandardHistogram from +// a Sample. +func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { + c := NewHistogram(s) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// HistogramSnapshot is a read-only copy of another Histogram. +type HistogramSnapshot struct { + sample *SampleSnapshot +} + +// Clear panics. +func (*HistogramSnapshot) Clear() { + panic("Clear called on a HistogramSnapshot") +} + +// Count returns the number of samples recorded at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample at the time the snapshot +// was taken. +func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the sample +// at the time the snapshot was taken. +func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *HistogramSnapshot) Sample() Sample { return h.sample } + +// Snapshot returns the snapshot. +func (h *HistogramSnapshot) Snapshot() Histogram { return h } + +// StdDev returns the standard deviation of the values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample at the time the snapshot was taken. +func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } + +// Update panics. +func (*HistogramSnapshot) Update(int64) { + panic("Update called on a HistogramSnapshot") +} + +// Variance returns the variance of inputs at the time the snapshot was taken. +func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } + +// NilHistogram is a no-op Histogram. +type NilHistogram struct{} + +// Clear is a no-op. +func (NilHistogram) Clear() {} + +// Count is a no-op. +func (NilHistogram) Count() int64 { return 0 } + +// Max is a no-op. +func (NilHistogram) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilHistogram) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilHistogram) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilHistogram) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilHistogram) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Sample is a no-op. +func (NilHistogram) Sample() Sample { return NilSample{} } + +// Snapshot is a no-op. +func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } + +// StdDev is a no-op. +func (NilHistogram) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilHistogram) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilHistogram) Update(v int64) {} + +// Variance is a no-op. +func (NilHistogram) Variance() float64 { return 0.0 } + +// StandardHistogram is the standard implementation of a Histogram and uses a +// Sample to bound its memory use. +type StandardHistogram struct { + sample Sample +} + +// Clear clears the histogram and its sample. +func (h *StandardHistogram) Clear() { h.sample.Clear() } + +// Count returns the number of samples recorded since the histogram was last +// cleared. +func (h *StandardHistogram) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample. +func (h *StandardHistogram) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample. +func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample. +func (h *StandardHistogram) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of the values in the sample. +func (h *StandardHistogram) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (h *StandardHistogram) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *StandardHistogram) Sample() Sample { return h.sample } + +// Snapshot returns a read-only copy of the histogram. +func (h *StandardHistogram) Snapshot() Histogram { + return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +} + +// StdDev returns the standard deviation of the values in the sample. +func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample. +func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } + +// Update samples a new value. +func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } + +// Variance returns the variance of the values in the sample. +func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go new file mode 100644 index 000000000..174b9477e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -0,0 +1,31 @@ +package metrics + +import ( + "encoding/json" + "io" + "time" +) + +// MarshalJSON returns a byte slice containing a JSON representation of all +// the metrics in the Registry. +func (r *StandardRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(r.GetAll()) +} + +// WriteJSON writes metrics from the given registry periodically to the +// specified io.Writer as JSON. +func WriteJSON(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteJSONOnce(r, w) + } +} + +// WriteJSONOnce writes metrics from the given registry to the specified +// io.Writer as JSON. +func WriteJSONOnce(r Registry, w io.Writer) { + json.NewEncoder(w).Encode(r) +} + +func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(p.GetAll()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go new file mode 100644 index 000000000..f8074c045 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -0,0 +1,80 @@ +package metrics + +import ( + "time" +) + +type Logger interface { + Printf(format string, v ...interface{}) +} + +func Log(r Registry, freq time.Duration, l Logger) { + LogScaled(r, freq, time.Nanosecond, l) +} + +// Output each metric in the given registry periodically using the given +// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. +func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + du := float64(scale) + duSuffix := scale.String()[1:] + + for _ = range time.Tick(freq) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + l.Printf("counter %s\n", name) + l.Printf(" count: %9d\n", metric.Count()) + case Gauge: + l.Printf("gauge %s\n", name) + l.Printf(" value: %9d\n", metric.Value()) + case GaugeFloat64: + l.Printf("gauge %s\n", name) + l.Printf(" value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + l.Printf("healthcheck %s\n", name) + l.Printf(" error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("histogram %s\n", name) + l.Printf(" count: %9d\n", h.Count()) + l.Printf(" min: %9d\n", h.Min()) + l.Printf(" max: %9d\n", h.Max()) + l.Printf(" mean: %12.2f\n", h.Mean()) + l.Printf(" stddev: %12.2f\n", h.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + l.Printf("meter %s\n", name) + l.Printf(" count: %9d\n", m.Count()) + l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) + l.Printf(" mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("timer %s\n", name) + l.Printf(" count: %9d\n", t.Count()) + l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) + l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) + l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) + l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) + l.Printf(" mean rate: %12.2f\n", t.RateMean()) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md new file mode 100644 index 000000000..47454f54b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/memory.md @@ -0,0 +1,285 @@ +Memory usage +============ + +(Highly unscientific.) + +Command used to gather static memory usage: + +```sh +grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" +``` + +Program used to gather baseline memory usage: + +```go +package main + +import "time" + +func main() { + time.Sleep(600e9) +} +``` + +Baseline +-------- + +``` +VmPeak: 42604 kB +VmSize: 42604 kB +VmLck: 0 kB +VmHWM: 1120 kB +VmRSS: 1120 kB +VmData: 35460 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 36 kB +VmSwap: 0 kB +``` + +Program used to gather metric memory usage (with other metrics being similar): + +```go +package main + +import ( + "fmt" + "metrics" + "time" +) + +func main() { + fmt.Sprintf("foo") + metrics.NewRegistry() + time.Sleep(600e9) +} +``` + +1000 counters registered +------------------------ + +``` +VmPeak: 44016 kB +VmSize: 44016 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.412 kB virtual, TODO 0.808 kB resident per counter.** + +100000 counters registered +-------------------------- + +``` +VmPeak: 55024 kB +VmSize: 55024 kB +VmLck: 0 kB +VmHWM: 12440 kB +VmRSS: 12440 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1024 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**0.1242 kB virtual, 0.1132 kB resident per counter.** + +1000 gauges registered +---------------------- + +``` +VmPeak: 44012 kB +VmSize: 44012 kB +VmLck: 0 kB +VmHWM: 1928 kB +VmRSS: 1928 kB +VmData: 36868 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 40 kB +VmSwap: 0 kB +``` + +**1.408 kB virtual, 0.808 kB resident per counter.** + +100000 gauges registered +------------------------ + +``` +VmPeak: 55020 kB +VmSize: 55020 kB +VmLck: 0 kB +VmHWM: 12432 kB +VmRSS: 12432 kB +VmData: 47876 kB +VmStk: 136 kB +VmExe: 1020 kB +VmLib: 1848 kB +VmPTE: 60 kB +VmSwap: 0 kB +``` + +**0.12416 kB virtual, 0.11312 resident per gauge.** + +1000 histograms with a uniform sample size of 1028 +-------------------------------------------------- + +``` +VmPeak: 72272 kB +VmSize: 72272 kB +VmLck: 0 kB +VmHWM: 16204 kB +VmRSS: 16204 kB +VmData: 65100 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 80 kB +VmSwap: 0 kB +``` + +**29.668 kB virtual, TODO 15.084 resident per histogram.** + +10000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 256912 kB +VmSize: 256912 kB +VmLck: 0 kB +VmHWM: 146204 kB +VmRSS: 146204 kB +VmData: 249740 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 448 kB +VmSwap: 0 kB +``` + +**21.4308 kB virtual, 14.5084 kB resident per histogram.** + +50000 histograms with a uniform sample size of 1028 +--------------------------------------------------- + +``` +VmPeak: 908112 kB +VmSize: 908112 kB +VmLck: 0 kB +VmHWM: 645832 kB +VmRSS: 645588 kB +VmData: 900940 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1716 kB +VmSwap: 1544 kB +``` + +**17.31016 kB virtual, 12.88936 kB resident per histogram.** + +1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +------------------------------------------------------------------------------------- + +``` +VmPeak: 62480 kB +VmSize: 62480 kB +VmLck: 0 kB +VmHWM: 11572 kB +VmRSS: 11572 kB +VmData: 55308 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 64 kB +VmSwap: 0 kB +``` + +**19.876 kB virtual, 10.452 kB resident per histogram.** + +10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 153296 kB +VmSize: 153296 kB +VmLck: 0 kB +VmHWM: 101176 kB +VmRSS: 101176 kB +VmData: 146124 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 240 kB +VmSwap: 0 kB +``` + +**11.0692 kB virtual, 10.0056 kB resident per histogram.** + +50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 +-------------------------------------------------------------------------------------- + +``` +VmPeak: 557264 kB +VmSize: 557264 kB +VmLck: 0 kB +VmHWM: 501056 kB +VmRSS: 501056 kB +VmData: 550092 kB +VmStk: 136 kB +VmExe: 1048 kB +VmLib: 1848 kB +VmPTE: 1032 kB +VmSwap: 0 kB +``` + +**10.2932 kB virtual, 9.99872 kB resident per histogram.** + +1000 meters +----------- + +``` +VmPeak: 74504 kB +VmSize: 74504 kB +VmLck: 0 kB +VmHWM: 24124 kB +VmRSS: 24124 kB +VmData: 67340 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 92 kB +VmSwap: 0 kB +``` + +**31.9 kB virtual, 23.004 kB resident per meter.** + +10000 meters +------------ + +``` +VmPeak: 278920 kB +VmSize: 278920 kB +VmLck: 0 kB +VmHWM: 227300 kB +VmRSS: 227300 kB +VmData: 271756 kB +VmStk: 136 kB +VmExe: 1040 kB +VmLib: 1848 kB +VmPTE: 488 kB +VmSwap: 0 kB +``` + +**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go new file mode 100644 index 000000000..7807406a3 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -0,0 +1,257 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" + "time" +) + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Count() int64 + Mark(int64) + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Meter + Stop() +} + +// GetOrRegisterMeter returns an existing Meter or constructs and registers a +// new StandardMeter. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterMeter(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeter).(Meter) +} + +// NewMeter constructs a new StandardMeter and launches a goroutine. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. +func NewMeter() Meter { + if UseNilMetrics { + return NilMeter{} + } + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters[m] = struct{}{} + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewMeter constructs and registers a new StandardMeter and launches a +// goroutine. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredMeter(name string, r Registry) Meter { + c := NewMeter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// MeterSnapshot is a read-only copy of another Meter. +type MeterSnapshot struct { + count int64 + rate1, rate5, rate15, rateMean uint64 +} + +// Count returns the count of events at the time the snapshot was taken. +func (m *MeterSnapshot) Count() int64 { return m.count } + +// Mark panics. +func (*MeterSnapshot) Mark(n int64) { + panic("Mark called on a MeterSnapshot") +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } + +// Snapshot returns the snapshot. +func (m *MeterSnapshot) Snapshot() Meter { return m } + +// Stop is a no-op. +func (m *MeterSnapshot) Stop() {} + +// NilMeter is a no-op Meter. +type NilMeter struct{} + +// Count is a no-op. +func (NilMeter) Count() int64 { return 0 } + +// Mark is a no-op. +func (NilMeter) Mark(n int64) {} + +// Rate1 is a no-op. +func (NilMeter) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilMeter) Rate5() float64 { return 0.0 } + +// Rate15is a no-op. +func (NilMeter) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilMeter) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilMeter) Snapshot() Meter { return NilMeter{} } + +// Stop is a no-op. +func (NilMeter) Stop() {} + +// StandardMeter is the standard implementation of a Meter. +type StandardMeter struct { + // Only used on stop. + lock sync.Mutex + snapshot *MeterSnapshot + a1, a5, a15 EWMA + startTime time.Time + stopped uint32 +} + +func newStandardMeter() *StandardMeter { + return &StandardMeter{ + snapshot: &MeterSnapshot{}, + a1: NewEWMA1(), + a5: NewEWMA5(), + a15: NewEWMA15(), + startTime: time.Now(), + } +} + +// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. +func (m *StandardMeter) Stop() { + m.lock.Lock() + stopped := m.stopped + m.stopped = 1 + m.lock.Unlock() + if stopped != 1 { + arbiter.Lock() + delete(arbiter.meters, m) + arbiter.Unlock() + } +} + +// Count returns the number of events recorded. +func (m *StandardMeter) Count() int64 { + return atomic.LoadInt64(&m.snapshot.count) +} + +// Mark records the occurance of n events. +func (m *StandardMeter) Mark(n int64) { + if atomic.LoadUint32(&m.stopped) == 1 { + return + } + + atomic.AddInt64(&m.snapshot.count, n) + + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) + m.updateSnapshot() +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (m *StandardMeter) Rate1() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (m *StandardMeter) Rate5() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (m *StandardMeter) Rate15() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) +} + +// RateMean returns the meter's mean rate of events per second. +func (m *StandardMeter) RateMean() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) +} + +// Snapshot returns a read-only copy of the meter. +func (m *StandardMeter) Snapshot() Meter { + copiedSnapshot := MeterSnapshot{ + count: atomic.LoadInt64(&m.snapshot.count), + rate1: atomic.LoadUint64(&m.snapshot.rate1), + rate5: atomic.LoadUint64(&m.snapshot.rate5), + rate15: atomic.LoadUint64(&m.snapshot.rate15), + rateMean: atomic.LoadUint64(&m.snapshot.rateMean), + } + return &copiedSnapshot +} + +func (m *StandardMeter) updateSnapshot() { + rate1 := math.Float64bits(m.a1.Rate()) + rate5 := math.Float64bits(m.a5.Rate()) + rate15 := math.Float64bits(m.a15.Rate()) + rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) + + atomic.StoreUint64(&m.snapshot.rate1, rate1) + atomic.StoreUint64(&m.snapshot.rate5, rate5) + atomic.StoreUint64(&m.snapshot.rate15, rate15) + atomic.StoreUint64(&m.snapshot.rateMean, rateMean) +} + +func (m *StandardMeter) tick() { + m.a1.Tick() + m.a5.Tick() + m.a15.Tick() + m.updateSnapshot() +} + +// meterArbiter ticks meters every 5s from a single goroutine. +// meters are references in a set for future stopping. +type meterArbiter struct { + sync.RWMutex + started bool + meters map[*StandardMeter]struct{} + ticker *time.Ticker +} + +var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} + +// Ticks meters on the scheduled interval +func (ma *meterArbiter) tick() { + for { + select { + case <-ma.ticker.C: + ma.tickMeters() + } + } +} + +func (ma *meterArbiter) tickMeters() { + ma.RLock() + defer ma.RUnlock() + for meter := range ma.meters { + meter.tick() + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go new file mode 100644 index 000000000..b97a49ed1 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/metrics.go @@ -0,0 +1,13 @@ +// Go port of Coda Hale's Metrics library +// +// +// +// Coda Hale's original work: +package metrics + +// UseNilMetrics is checked by the constructor functions for all of the +// standard metrics. If it is true, the metric returned is a stub. +// +// This global kill-switch helps quantify the observer effect and makes +// for less cluttered pprof profiles. +var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go new file mode 100644 index 000000000..266b6c93d --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go @@ -0,0 +1,119 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + "strings" + "time" +) + +var shortHostName string = "" + +// OpenTSDBConfig provides a container with configuration parameters for +// the OpenTSDB exporter +type OpenTSDBConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names +} + +// OpenTSDB is a blocking exporter function which reports metrics in r +// to a TSDB server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + OpenTSDBWithConfig(OpenTSDBConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + }) +} + +// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, +// but it takes a OpenTSDBConfig instead. +func OpenTSDBWithConfig(c OpenTSDBConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := openTSDB(&c); nil != err { + log.Println(err) + } + } +} + +func getShortHostname() string { + if shortHostName == "" { + host, _ := os.Hostname() + if index := strings.Index(host, "."); index > 0 { + shortHostName = host[:index] + } else { + shortHostName = host + } + } + return shortHostName +} + +func openTSDB(c *OpenTSDBConfig) error { + shortHostname := getShortHostname() + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case Gauge: + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeFloat64: + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go new file mode 100644 index 000000000..b3bab64e1 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -0,0 +1,363 @@ +package metrics + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +// A Registry holds references to a set of metrics by name and can iterate +// over them, calling callback functions provided by the user. +// +// This is an interface so as to encourage other structs to implement +// the Registry API as appropriate. +type Registry interface { + + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // GetAll metrics in the Registry. + GetAll() map[string]map[string]interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error + + // Run all registered healthchecks. + RunHealthchecks() + + // Unregister the metric with the given name. + Unregister(string) + + // Unregister all metrics. (Mostly for testing.) + UnregisterAll() +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.RWMutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + for name, i := range r.registered() { + f(name, i) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + // access the read lock first which should be re-entrant + r.mutex.RLock() + metric, ok := r.metrics[name] + r.mutex.RUnlock() + if ok { + return metric + } + + // only take the write lock if we'll be modifying the metrics map + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +// Run all registered healthchecks. +func (r *StandardRegistry) RunHealthchecks() { + r.mutex.RLock() + defer r.mutex.RUnlock() + for _, i := range r.metrics { + if h, ok := i.(Healthcheck); ok { + h.Check() + } + } +} + +// GetAll metrics in the Registry +func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return data +} + +// Unregister the metric with the given name. +func (r *StandardRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + r.stop(name) + delete(r.metrics, name) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *StandardRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name, _ := range r.metrics { + r.stop(name) + delete(r.metrics, name) + } +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + switch i.(type) { + case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: + r.metrics[name] = i + } + return nil +} + +func (r *StandardRegistry) registered() map[string]interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + metrics := make(map[string]interface{}, len(r.metrics)) + for name, i := range r.metrics { + metrics[name] = i + } + return metrics +} + +func (r *StandardRegistry) stop(name string) { + if i, ok := r.metrics[name]; ok { + if s, ok := i.(Stoppable); ok { + s.Stop() + } + } +} + +// Stoppable defines the metrics which has to be stopped. +type Stoppable interface { + Stop() +} + +type PrefixedRegistry struct { + underlying Registry + prefix string +} + +func NewPrefixedRegistry(prefix string) Registry { + return &PrefixedRegistry{ + underlying: NewRegistry(), + prefix: prefix, + } +} + +func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { + return &PrefixedRegistry{ + underlying: parent, + prefix: prefix, + } +} + +// Call the given function for each registered metric. +func (r *PrefixedRegistry) Each(fn func(string, interface{})) { + wrappedFn := func(prefix string) func(string, interface{}) { + return func(name string, iface interface{}) { + if strings.HasPrefix(name, prefix) { + fn(name, iface) + } else { + return + } + } + } + + baseRegistry, prefix := findPrefix(r, "") + baseRegistry.Each(wrappedFn(prefix)) +} + +func findPrefix(registry Registry, prefix string) (Registry, string) { + switch r := registry.(type) { + case *PrefixedRegistry: + return findPrefix(r.underlying, r.prefix+prefix) + case *StandardRegistry: + return r, prefix + } + return nil, "" +} + +// Get the metric by the given name or nil if none is registered. +func (r *PrefixedRegistry) Get(name string) interface{} { + realName := r.prefix + name + return r.underlying.Get(realName) +} + +// Gets an existing metric or registers the given one. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { + realName := r.prefix + name + return r.underlying.GetOrRegister(realName, metric) +} + +// Register the given metric under the given name. The name will be prefixed. +func (r *PrefixedRegistry) Register(name string, metric interface{}) error { + realName := r.prefix + name + return r.underlying.Register(realName, metric) +} + +// Run all registered healthchecks. +func (r *PrefixedRegistry) RunHealthchecks() { + r.underlying.RunHealthchecks() +} + +// GetAll metrics in the Registry +func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { + return r.underlying.GetAll() +} + +// Unregister the metric with the given name. The name will be prefixed. +func (r *PrefixedRegistry) Unregister(name string) { + realName := r.prefix + name + r.underlying.Unregister(realName) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *PrefixedRegistry) UnregisterAll() { + r.underlying.UnregisterAll() +} + +var DefaultRegistry Registry = NewRegistry() + +// Call the given function for each registered metric. +func Each(f func(string, interface{})) { + DefaultRegistry.Each(f) +} + +// Get the metric by the given name or nil if none is registered. +func Get(name string) interface{} { + return DefaultRegistry.Get(name) +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +func GetOrRegister(name string, i interface{}) interface{} { + return DefaultRegistry.GetOrRegister(name, i) +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func Register(name string, i interface{}) error { + return DefaultRegistry.Register(name, i) +} + +// Register the given metric under the given name. Panics if a metric by the +// given name is already registered. +func MustRegister(name string, i interface{}) { + if err := Register(name, i); err != nil { + panic(err) + } +} + +// Run all registered healthchecks. +func RunHealthchecks() { + DefaultRegistry.RunHealthchecks() +} + +// Unregister the metric with the given name. +func Unregister(name string) { + DefaultRegistry.Unregister(name) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go new file mode 100644 index 000000000..11c6b785a --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -0,0 +1,212 @@ +package metrics + +import ( + "runtime" + "runtime/pprof" + "time" +) + +var ( + memStats runtime.MemStats + runtimeMetrics struct { + MemStats struct { + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + GCCPUFraction GaugeFloat64 + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge + } + NumCgoCall Gauge + NumGoroutine Gauge + NumThread Gauge + ReadMemStats Timer + } + frees uint64 + lookups uint64 + mallocs uint64 + numGC uint32 + numCgoCalls int64 + + threadCreateProfile = pprof.Lookup("threadcreate") +) + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called as a goroutine. +func CaptureRuntimeMemStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureRuntimeMemStatsOnce(r) + } +} + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called in a background +// goroutine. Giving a registry which has not been given to +// RegisterRuntimeMemStats will panic. +// +// Be very careful with this because runtime.ReadMemStats calls the C +// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() +// and that last one does what it says on the tin. +func CaptureRuntimeMemStatsOnce(r Registry) { + t := time.Now() + runtime.ReadMemStats(&memStats) // This takes 50-200us. + runtimeMetrics.ReadMemStats.UpdateSince(t) + + runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) + runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) + if memStats.DebugGC { + runtimeMetrics.MemStats.DebugGC.Update(1) + } else { + runtimeMetrics.MemStats.DebugGC.Update(0) + } + if memStats.EnableGC { + runtimeMetrics.MemStats.EnableGC.Update(1) + } else { + runtimeMetrics.MemStats.EnableGC.Update(0) + } + + runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) + runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) + runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) + runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) + runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) + runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) + runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) + runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) + runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) + runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) + runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) + runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) + runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) + runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) + runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) + runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) + + // + i := numGC % uint32(len(memStats.PauseNs)) + ii := memStats.NumGC % uint32(len(memStats.PauseNs)) + if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { + for i = 0; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } else { + if i > ii { + for ; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + i = 0 + } + for ; i < ii; i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } + frees = memStats.Frees + lookups = memStats.Lookups + mallocs = memStats.Mallocs + numGC = memStats.NumGC + + runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) + runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) + runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) + runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) + runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) + + currentNumCgoCalls := numCgoCall() + runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) + numCgoCalls = currentNumCgoCalls + + runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) + + runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) +} + +// Register runtimeMetrics for the Go runtime statistics exported in runtime and +// specifically runtime.MemStats. The runtimeMetrics are named by their +// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. +func RegisterRuntimeMemStats(r Registry) { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() + + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go new file mode 100644 index 000000000..e3391f4e8 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go @@ -0,0 +1,10 @@ +// +build cgo +// +build !appengine + +package metrics + +import "runtime" + +func numCgoCall() int64 { + return runtime.NumCgoCall() +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go new file mode 100644 index 000000000..ca12c05ba --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go @@ -0,0 +1,9 @@ +// +build go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return memStats.GCCPUFraction +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go new file mode 100644 index 000000000..616a3b475 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go @@ -0,0 +1,7 @@ +// +build !cgo appengine + +package metrics + +func numCgoCall() int64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go new file mode 100644 index 000000000..be96aa6f1 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go @@ -0,0 +1,9 @@ +// +build !go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go new file mode 100644 index 000000000..fecee5ef6 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/sample.go @@ -0,0 +1,616 @@ +package metrics + +import ( + "math" + "math/rand" + "sort" + "sync" + "time" +) + +const rescaleThreshold = time.Hour + +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Size() int + Snapshot() Sample + StdDev() float64 + Sum() int64 + Update(int64) + Values() []int64 + Variance() float64 +} + +// ExpDecaySample is an exponentially-decaying sample using a forward-decaying +// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time +// Decay Model for Streaming Systems". +// +// +type ExpDecaySample struct { + alpha float64 + count int64 + mutex sync.Mutex + reservoirSize int + t0, t1 time.Time + values *expDecaySampleHeap +} + +// NewExpDecaySample constructs a new exponentially-decaying sample with the +// given reservoir size and alpha. +func NewExpDecaySample(reservoirSize int, alpha float64) Sample { + if UseNilMetrics { + return NilSample{} + } + s := &ExpDecaySample{ + alpha: alpha, + reservoirSize: reservoirSize, + t0: time.Now(), + values: newExpDecaySampleHeap(reservoirSize), + } + s.t1 = s.t0.Add(rescaleThreshold) + return s +} + +// Clear clears all samples. +func (s *ExpDecaySample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.t0 = time.Now() + s.t1 = s.t0.Add(rescaleThreshold) + s.values.Clear() +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *ExpDecaySample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *ExpDecaySample) Max() int64 { + return SampleMax(s.Values()) +} + +// Mean returns the mean of the values in the sample. +func (s *ExpDecaySample) Mean() float64 { + return SampleMean(s.Values()) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *ExpDecaySample) Min() int64 { + return SampleMin(s.Values()) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *ExpDecaySample) Percentile(p float64) float64 { + return SamplePercentile(s.Values(), p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.Values(), ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *ExpDecaySample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.values.Size() +} + +// Snapshot returns a read-only copy of the sample. +func (s *ExpDecaySample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *ExpDecaySample) StdDev() float64 { + return SampleStdDev(s.Values()) +} + +// Sum returns the sum of the values in the sample. +func (s *ExpDecaySample) Sum() int64 { + return SampleSum(s.Values()) +} + +// Update samples a new value. +func (s *ExpDecaySample) Update(v int64) { + s.update(time.Now(), v) +} + +// Values returns a copy of the values in the sample. +func (s *ExpDecaySample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return values +} + +// Variance returns the variance of the values in the sample. +func (s *ExpDecaySample) Variance() float64 { + return SampleVariance(s.Values()) +} + +// update samples a new value at a particular timestamp. This is a method all +// its own to facilitate testing. +func (s *ExpDecaySample) update(t time.Time, v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if s.values.Size() == s.reservoirSize { + s.values.Pop() + } + s.values.Push(expDecaySample{ + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + v: v, + }) + if t.After(s.t1) { + values := s.values.Values() + t0 := s.t0 + s.values.Clear() + s.t0 = t + s.t1 = s.t0.Add(rescaleThreshold) + for _, v := range values { + v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) + s.values.Push(v) + } + } +} + +// NilSample is a no-op Sample. +type NilSample struct{} + +// Clear is a no-op. +func (NilSample) Clear() {} + +// Count is a no-op. +func (NilSample) Count() int64 { return 0 } + +// Max is a no-op. +func (NilSample) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilSample) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilSample) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilSample) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilSample) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Size is a no-op. +func (NilSample) Size() int { return 0 } + +// Sample is a no-op. +func (NilSample) Snapshot() Sample { return NilSample{} } + +// StdDev is a no-op. +func (NilSample) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilSample) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilSample) Update(v int64) {} + +// Values is a no-op. +func (NilSample) Values() []int64 { return []int64{} } + +// Variance is a no-op. +func (NilSample) Variance() float64 { return 0.0 } + +// SampleMax returns the maximum value of the slice of int64. +func SampleMax(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var max int64 = math.MinInt64 + for _, v := range values { + if max < v { + max = v + } + } + return max +} + +// SampleMean returns the mean value of the slice of int64. +func SampleMean(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + return float64(SampleSum(values)) / float64(len(values)) +} + +// SampleMin returns the minimum value of the slice of int64. +func SampleMin(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var min int64 = math.MaxInt64 + for _, v := range values { + if min > v { + min = v + } + } + return min +} + +// SamplePercentiles returns an arbitrary percentile of the slice of int64. +func SamplePercentile(values int64Slice, p float64) float64 { + return SamplePercentiles(values, []float64{p})[0] +} + +// SamplePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. +func SamplePercentiles(values int64Slice, ps []float64) []float64 { + scores := make([]float64, len(ps)) + size := len(values) + if size > 0 { + sort.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) + } + } + } + return scores +} + +// SampleSnapshot is a read-only copy of another Sample. +type SampleSnapshot struct { + count int64 + values []int64 +} + +func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { + return &SampleSnapshot{ + count: count, + values: values, + } +} + +// Clear panics. +func (*SampleSnapshot) Clear() { + panic("Clear called on a SampleSnapshot") +} + +// Count returns the count of inputs at the time the snapshot was taken. +func (s *SampleSnapshot) Count() int64 { return s.count } + +// Max returns the maximal value at the time the snapshot was taken. +func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } + +// Mean returns the mean value at the time the snapshot was taken. +func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } + +// Min returns the minimal value at the time the snapshot was taken. +func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } + +// Percentile returns an arbitrary percentile of values at the time the +// snapshot was taken. +func (s *SampleSnapshot) Percentile(p float64) float64 { + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values at the time +// the snapshot was taken. +func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample at the time the snapshot was taken. +func (s *SampleSnapshot) Size() int { return len(s.values) } + +// Snapshot returns the snapshot. +func (s *SampleSnapshot) Snapshot() Sample { return s } + +// StdDev returns the standard deviation of values at the time the snapshot was +// taken. +func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } + +// Sum returns the sum of values at the time the snapshot was taken. +func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } + +// Update panics. +func (*SampleSnapshot) Update(int64) { + panic("Update called on a SampleSnapshot") +} + +// Values returns a copy of the values in the sample. +func (s *SampleSnapshot) Values() []int64 { + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of values at the time the snapshot was taken. +func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } + +// SampleStdDev returns the standard deviation of the slice of int64. +func SampleStdDev(values []int64) float64 { + return math.Sqrt(SampleVariance(values)) +} + +// SampleSum returns the sum of the slice of int64. +func SampleSum(values []int64) int64 { + var sum int64 + for _, v := range values { + sum += v + } + return sum +} + +// SampleVariance returns the variance of the slice of int64. +func SampleVariance(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + m := SampleMean(values) + var sum float64 + for _, v := range values { + d := float64(v) - m + sum += d * d + } + return sum / float64(len(values)) +} + +// A uniform sample using Vitter's Algorithm R. +// +// +type UniformSample struct { + count int64 + mutex sync.Mutex + reservoirSize int + values []int64 +} + +// NewUniformSample constructs a new uniform sample with the given reservoir +// size. +func NewUniformSample(reservoirSize int) Sample { + if UseNilMetrics { + return NilSample{} + } + return &UniformSample{ + reservoirSize: reservoirSize, + values: make([]int64, 0, reservoirSize), + } +} + +// Clear clears all samples. +func (s *UniformSample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.values = make([]int64, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *UniformSample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *UniformSample) Max() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMax(s.values) +} + +// Mean returns the mean of the values in the sample. +func (s *UniformSample) Mean() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMean(s.values) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *UniformSample) Min() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMin(s.values) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *UniformSample) Percentile(p float64) float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *UniformSample) Percentiles(ps []float64) []float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *UniformSample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *UniformSample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *UniformSample) StdDev() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleStdDev(s.values) +} + +// Sum returns the sum of the values in the sample. +func (s *UniformSample) Sum() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleSum(s.values) +} + +// Update samples a new value. +func (s *UniformSample) Update(v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) < s.reservoirSize { + s.values = append(s.values, v) + } else { + r := rand.Int63n(s.count) + if r < int64(len(s.values)) { + s.values[int(r)] = v + } + } +} + +// Values returns a copy of the values in the sample. +func (s *UniformSample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of the values in the sample. +func (s *UniformSample) Variance() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleVariance(s.values) +} + +// expDecaySample represents an individual sample in a heap. +type expDecaySample struct { + k float64 + v int64 +} + +func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { + return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} +} + +// expDecaySampleHeap is a min-heap of expDecaySamples. +// The internal implementation is copied from the standard library's container/heap +type expDecaySampleHeap struct { + s []expDecaySample +} + +func (h *expDecaySampleHeap) Clear() { + h.s = h.s[:0] +} + +func (h *expDecaySampleHeap) Push(s expDecaySample) { + n := len(h.s) + h.s = h.s[0 : n+1] + h.s[n] = s + h.up(n) +} + +func (h *expDecaySampleHeap) Pop() expDecaySample { + n := len(h.s) - 1 + h.s[0], h.s[n] = h.s[n], h.s[0] + h.down(0, n) + + n = len(h.s) + s := h.s[n-1] + h.s = h.s[0 : n-1] + return s +} + +func (h *expDecaySampleHeap) Size() int { + return len(h.s) +} + +func (h *expDecaySampleHeap) Values() []expDecaySample { + return h.s +} + +func (h *expDecaySampleHeap) up(j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + j = i + } +} + +func (h *expDecaySampleHeap) down(i, n int) { + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { + j = j2 // = 2*i + 2 // right child + } + if !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + i = j + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go new file mode 100644 index 000000000..693f19085 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/syslog.go @@ -0,0 +1,78 @@ +// +build !windows + +package metrics + +import ( + "fmt" + "log/syslog" + "time" +) + +// Output each metric in the given registry to syslog periodically using +// the given syslogger. +func Syslog(r Registry, d time.Duration, w *syslog.Writer) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case Gauge: + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + case GaugeFloat64: + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case Healthcheck: + metric.Check() + w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", + name, + h.Count(), + h.Min(), + h.Max(), + h.Mean(), + h.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + )) + case Meter: + m := metric.Snapshot() + w.Info(fmt.Sprintf( + "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", + name, + m.Count(), + m.Rate1(), + m.Rate5(), + m.Rate15(), + m.RateMean(), + )) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", + name, + t.Count(), + t.Min(), + t.Max(), + t.Mean(), + t.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + t.Rate1(), + t.Rate5(), + t.Rate15(), + t.RateMean(), + )) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go new file mode 100644 index 000000000..d6ec4c626 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -0,0 +1,329 @@ +package metrics + +import ( + "sync" + "time" +) + +// Timers capture the duration and rate of events. +type Timer interface { + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Timer + StdDev() float64 + Stop() + Sum() int64 + Time(func()) + Update(time.Duration) + UpdateSince(time.Time) + Variance() float64 +} + +// GetOrRegisterTimer returns an existing Timer or constructs and registers a +// new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterTimer(name string, r Registry) Timer { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewTimer).(Timer) +} + +// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewCustomTimer(h Histogram, m Meter) Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: h, + meter: m, + } +} + +// NewRegisteredTimer constructs and registers a new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredTimer(name string, r Registry) Timer { + c := NewTimer() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewTimer constructs a new StandardTimer using an exponentially-decaying +// sample with the same reservoir size and alpha as UNIX load averages. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewTimer() Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), + meter: NewMeter(), + } +} + +// NilTimer is a no-op Timer. +type NilTimer struct { + h Histogram + m Meter +} + +// Count is a no-op. +func (NilTimer) Count() int64 { return 0 } + +// Max is a no-op. +func (NilTimer) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilTimer) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilTimer) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilTimer) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilTimer) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Rate1 is a no-op. +func (NilTimer) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilTimer) Rate5() float64 { return 0.0 } + +// Rate15 is a no-op. +func (NilTimer) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilTimer) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilTimer) Snapshot() Timer { return NilTimer{} } + +// StdDev is a no-op. +func (NilTimer) StdDev() float64 { return 0.0 } + +// Stop is a no-op. +func (NilTimer) Stop() {} + +// Sum is a no-op. +func (NilTimer) Sum() int64 { return 0 } + +// Time is a no-op. +func (NilTimer) Time(func()) {} + +// Update is a no-op. +func (NilTimer) Update(time.Duration) {} + +// UpdateSince is a no-op. +func (NilTimer) UpdateSince(time.Time) {} + +// Variance is a no-op. +func (NilTimer) Variance() float64 { return 0.0 } + +// StandardTimer is the standard implementation of a Timer and uses a Histogram +// and Meter. +type StandardTimer struct { + histogram Histogram + meter Meter + mutex sync.Mutex +} + +// Count returns the number of events recorded. +func (t *StandardTimer) Count() int64 { + return t.histogram.Count() +} + +// Max returns the maximum value in the sample. +func (t *StandardTimer) Max() int64 { + return t.histogram.Max() +} + +// Mean returns the mean of the values in the sample. +func (t *StandardTimer) Mean() float64 { + return t.histogram.Mean() +} + +// Min returns the minimum value in the sample. +func (t *StandardTimer) Min() int64 { + return t.histogram.Min() +} + +// Percentile returns an arbitrary percentile of the values in the sample. +func (t *StandardTimer) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (t *StandardTimer) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (t *StandardTimer) Rate1() float64 { + return t.meter.Rate1() +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (t *StandardTimer) Rate5() float64 { + return t.meter.Rate5() +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (t *StandardTimer) Rate15() float64 { + return t.meter.Rate15() +} + +// RateMean returns the meter's mean rate of events per second. +func (t *StandardTimer) RateMean() float64 { + return t.meter.RateMean() +} + +// Snapshot returns a read-only copy of the timer. +func (t *StandardTimer) Snapshot() Timer { + t.mutex.Lock() + defer t.mutex.Unlock() + return &TimerSnapshot{ + histogram: t.histogram.Snapshot().(*HistogramSnapshot), + meter: t.meter.Snapshot().(*MeterSnapshot), + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (t *StandardTimer) StdDev() float64 { + return t.histogram.StdDev() +} + +// Stop stops the meter. +func (t *StandardTimer) Stop() { + t.meter.Stop() +} + +// Sum returns the sum in the sample. +func (t *StandardTimer) Sum() int64 { + return t.histogram.Sum() +} + +// Record the duration of the execution of the given function. +func (t *StandardTimer) Time(f func()) { + ts := time.Now() + f() + t.Update(time.Since(ts)) +} + +// Record the duration of an event. +func (t *StandardTimer) Update(d time.Duration) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(d)) + t.meter.Mark(1) +} + +// Record the duration of an event that started at a time and ends now. +func (t *StandardTimer) UpdateSince(ts time.Time) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(time.Since(ts))) + t.meter.Mark(1) +} + +// Variance returns the variance of the values in the sample. +func (t *StandardTimer) Variance() float64 { + return t.histogram.Variance() +} + +// TimerSnapshot is a read-only copy of another Timer. +type TimerSnapshot struct { + histogram *HistogramSnapshot + meter *MeterSnapshot +} + +// Count returns the number of events recorded at the time the snapshot was +// taken. +func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } + +// Max returns the maximum value at the time the snapshot was taken. +func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } + +// Mean returns the mean value at the time the snapshot was taken. +func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } + +// Min returns the minimum value at the time the snapshot was taken. +func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } + +// Percentile returns an arbitrary percentile of sampled values at the time the +// snapshot was taken. +func (t *TimerSnapshot) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of sampled values at +// the time the snapshot was taken. +func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } + +// Snapshot returns the snapshot. +func (t *TimerSnapshot) Snapshot() Timer { return t } + +// StdDev returns the standard deviation of the values at the time the snapshot +// was taken. +func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } + +// Stop is a no-op. +func (t *TimerSnapshot) Stop() {} + +// Sum returns the sum at the time the snapshot was taken. +func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } + +// Time panics. +func (*TimerSnapshot) Time(func()) { + panic("Time called on a TimerSnapshot") +} + +// Update panics. +func (*TimerSnapshot) Update(time.Duration) { + panic("Update called on a TimerSnapshot") +} + +// UpdateSince panics. +func (*TimerSnapshot) UpdateSince(time.Time) { + panic("UpdateSince called on a TimerSnapshot") +} + +// Variance returns the variance of the values at the time the snapshot was +// taken. +func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh new file mode 100644 index 000000000..c4ae91e64 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/validate.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e + +# check there are no formatting issues +GOFMT_LINES=`gofmt -l . | wc -l | xargs` +test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" + +# run the tests for the root package +go test -race . diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go new file mode 100644 index 000000000..091e971d2 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/writer.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "fmt" + "io" + "sort" + "time" +) + +// Write sorts writes each metric in the given registry periodically to the +// given io.Writer. +func Write(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteOnce(r, w) + } +} + +// WriteOnce sorts and writes metrics in the given registry to the given +// io.Writer. +func WriteOnce(r Registry, w io.Writer) { + var namedMetrics namedMetricSlice + r.Each(func(name string, i interface{}) { + namedMetrics = append(namedMetrics, namedMetric{name, i}) + }) + + sort.Sort(namedMetrics) + for _, namedMetric := range namedMetrics { + switch metric := namedMetric.m.(type) { + case Counter: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case Gauge: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %9d\n", metric.Value()) + case GaugeFloat64: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) + fmt.Fprintf(w, " error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "histogram %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", h.Count()) + fmt.Fprintf(w, " min: %9d\n", h.Min()) + fmt.Fprintf(w, " max: %9d\n", h.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "meter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", m.Count()) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "timer %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %9d\n", t.Min()) + fmt.Fprintf(w, " max: %9d\n", t.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) + } + } +} + +type namedMetric struct { + name string + m interface{} +} + +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name +} diff --git a/vendor/github.com/rogpeppe/fastuuid/LICENSE b/vendor/github.com/rogpeppe/fastuuid/LICENSE new file mode 100644 index 000000000..9525fc825 --- /dev/null +++ b/vendor/github.com/rogpeppe/fastuuid/LICENSE @@ -0,0 +1,26 @@ +Copyright © 2014, Roger Peppe +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of this project nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/fastuuid/README.md b/vendor/github.com/rogpeppe/fastuuid/README.md new file mode 100644 index 000000000..6e8f134ae --- /dev/null +++ b/vendor/github.com/rogpeppe/fastuuid/README.md @@ -0,0 +1,50 @@ +# fastuuid +-- + import "github.com/rogpeppe/fastuuid" + +Package fastuuid provides fast UUID generation of 192 bit universally unique +identifiers. It does not provide formatting or parsing of the identifiers (it is +assumed that a simple hexadecimal or base64 representation is sufficient, for +which adequate functionality exists elsewhere). + +Note that the generated UUIDs are not unguessable - each UUID generated from a +Generator is adjacent to the previously generated UUID. + +It ignores RFC 4122. + +## Usage + +#### type Generator + +```go +type Generator struct { +} +``` + +Generator represents a UUID generator that generates UUIDs in sequence from a +random starting point. + +#### func MustNewGenerator + +```go +func MustNewGenerator() *Generator +``` +MustNewGenerator is like NewGenerator but panics on failure. + +#### func NewGenerator + +```go +func NewGenerator() (*Generator, error) +``` +NewGenerator returns a new Generator. It can fail if the crypto/rand read fails. + +#### func (*Generator) Next + +```go +func (g *Generator) Next() [24]byte +``` +Next returns the next UUID from the generator. Only the first 8 bytes can differ +from the previous UUID, so taking a slice of the first 16 bytes is sufficient to +provide a somewhat less secure 128 bit UUID. + +It is OK to call this method concurrently. diff --git a/vendor/github.com/rogpeppe/fastuuid/uuid.go b/vendor/github.com/rogpeppe/fastuuid/uuid.go new file mode 100644 index 000000000..c7cf6b2e4 --- /dev/null +++ b/vendor/github.com/rogpeppe/fastuuid/uuid.go @@ -0,0 +1,66 @@ +// Package fastuuid provides fast UUID generation of 192 bit +// universally unique identifiers. It does not provide +// formatting or parsing of the identifiers (it is assumed +// that a simple hexadecimal or base64 representation +// is sufficient, for which adequate functionality exists elsewhere). +// +// Note that the generated UUIDs are not unguessable - each +// UUID generated from a Generator is adjacent to the +// previously generated UUID. +// +// It ignores RFC 4122. +package fastuuid + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "sync/atomic" +) + +// Generator represents a UUID generator that +// generates UUIDs in sequence from a random starting +// point. +type Generator struct { + seed [24]byte + counter uint64 +} + +// NewGenerator returns a new Generator. +// It can fail if the crypto/rand read fails. +func NewGenerator() (*Generator, error) { + var g Generator + _, err := rand.Read(g.seed[:]) + if err != nil { + return nil, errors.New("cannot generate random seed: " + err.Error()) + } + return &g, nil +} + +// MustNewGenerator is like NewGenerator +// but panics on failure. +func MustNewGenerator() *Generator { + g, err := NewGenerator() + if err != nil { + panic(err) + } + return g +} + +// Next returns the next UUID from the generator. +// Only the first 8 bytes can differ from the previous +// UUID, so taking a slice of the first 16 bytes +// is sufficient to provide a somewhat less secure 128 bit UUID. +// +// It is OK to call this method concurrently. +func (g *Generator) Next() [24]byte { + x := atomic.AddUint64(&g.counter, 1) + var counterBytes [8]byte + binary.LittleEndian.PutUint64(counterBytes[:], x) + + uuid := g.seed + for i, b := range counterBytes { + uuid[i] ^= b + } + return uuid +} diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 000000000..2d7bb2bf5 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 000000000..369e3d551 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 000000000..937942c2b --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 000000000..59b885168 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1339 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + + equalities = &equality{ + data: pointer, + next: equalities, + } + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities.data + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) + + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = append(diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + // Remember that string slicing is by byte - we want by rune here. + text := string([]rune(text1)[i : i+int(n)]) + i += int(n) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len([]rune(text1)) { + return nil, fmt.Errorf("Delta length (%v) smaller than source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 000000000..d3acc32ce --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 000000000..17374e109 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 000000000..116c04348 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + start1 int + start2 int + length1 int + length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indicies are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.length1 == 0 { + coords1 = strconv.Itoa(p.start1) + ",0" + } else if p.length1 == 1 { + coords1 = strconv.Itoa(p.start1 + 1) + } else { + coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) + } + + if p.length2 == 0 { + coords2 = strconv.Itoa(p.start2) + ",0" + } else if p.length2 == 1 { + coords2 = strconv.Itoa(p.start2 + 1) + } else { + coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.start2 : patch.start2+patch.length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.start2-padding) + minEnd := min(len(text), patch.start2+patch.length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.start2-padding):patch.start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.start1 -= len(prefix) + patch.start2 -= len(prefix) + // Extend the lengths. + patch.length1 += len(prefix) + len(suffix) + patch.length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.start1 = charCount1 + patch.start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.length1 += len(aDiff.Text) + patch.length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.start1 = aPatch.start1 + patchCopy.start2 = aPatch.start2 + patchCopy.length1 = aPatch.length1 + patchCopy.length2 = aPatch.length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.length2 - aPatch.length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].start1 += paddingLength + patches[i].start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].start1 -= paddingLength // Should be 0. + patches[0].start2 -= paddingLength // Should be 0. + patches[0].length1 += paddingLength + patches[0].length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].start1 -= extraLength + patches[0].start2 -= extraLength + patches[0].length1 += extraLength + patches[0].length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].length1 += paddingLength + patches[last].length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].length1 += extraLength + patches[last].length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + start1 := bigpatch.start1 + start2 := bigpatch.start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.start1 = start1 - len(precontext) + patch.start2 = start2 - len(precontext) + if len(precontext) != 0 { + patch.length1 = len(precontext) + patch.length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.length2 += len(diffText) + start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.length1 += len(diffText) + start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)] + + patch.length1 += len(diffText) + start1 += len(diffText) + if diffType == DiffEqual { + patch.length2 += len(diffText) + start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.length1 += len(postcontext) + patch.length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.start1-- + patch.length1 = 1 + } else if m[2] == "0" { + patch.length1 = 0 + } else { + patch.start1-- + patch.length1, _ = strconv.Atoi(m[2]) + } + + patch.start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.start2-- + patch.length2 = 1 + } else if m[4] == "0" { + patch.length2 = 0 + } else { + patch.start2-- + patch.length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 000000000..265f29cc7 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore new file mode 100644 index 000000000..190f12234 --- /dev/null +++ b/vendor/golang.org/x/net/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 000000000..53fc52579 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 000000000..55fd826f7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 000000000..360d5aa37 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 000000000..b13941258 --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,256 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 000000000..4f720f530 --- /dev/null +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -0,0 +1,80 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "fmt" + "net/http" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// convering panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +type noDialH2RoundTripper struct{ t *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.t.RoundTrip(req) + if err == ErrNoCachedConn { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 000000000..20fd7626a --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connErrorReason wraps a ConnectionError with an informative error about why it occurs. + +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(ErrCodeProtocol). +type connError struct { + Code ErrCode + Reason string +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go new file mode 100644 index 000000000..47da0f0bf --- /dev/null +++ b/vendor/golang.org/x/net/http2/fixed_buffer.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" +) + +// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. +// It never allocates, but moves old data as new data is written. +type fixedBuffer struct { + buf []byte + r, w int +} + +var ( + errReadEmpty = errors.New("read from empty fixedBuffer") + errWriteFull = errors.New("write on full fixedBuffer") +) + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *fixedBuffer) Read(p []byte) (n int, err error) { + if b.r == b.w { + return 0, errReadEmpty + } + n = copy(p, b.buf[b.r:b.w]) + b.r += n + if b.r == b.w { + b.r = 0 + b.w = 0 + } + return n, nil +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *fixedBuffer) Len() int { + return b.w - b.r +} + +// Write copies bytes from p into the buffer. +// It is an error to write more data than the buffer can hold. +func (b *fixedBuffer) Write(p []byte) (n int, err error) { + // Slide existing data to beginning. + if b.r > 0 && len(p) > len(b.buf)-b.w { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + // Write new data. + n = copy(b.buf[b.w:], p) + b.w += n + if n < len(p) { + err = errWriteFull + } + return n, err +} diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 000000000..957de2542 --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + remain := (1<<31 - 1) - f.n + if n > remain { + return false + } + f.n += n + return true +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 000000000..358833fed --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1544 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := &DataFrame{ + FrameHeader: fh, + } + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 255 { + return errPadLength + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { + f.checkValid() + buf := f.p + for len(buf) > 0 { + settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) + if settingID == s { + return binary.BigEndian.Uint32(buf[2:6]), true + } + buf = buf[6:] + } + return 0, false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + buf := f.p + for len(buf) > 0 { + if err := fn(Setting{ + SettingID(binary.BigEndian.Uint16(buf[:2])), + binary.BigEndian.Uint32(buf[2:6]), + }); err != nil { + return err + } + buf = buf[6:] + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httplex.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go new file mode 100644 index 000000000..2b72855f5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go16.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "net/http" + "time" +) + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return t1.ExpectContinueTimeout +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go new file mode 100644 index 000000000..47b7fae08 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package http2 + +import ( + "context" + "net" + "net/http" + "net/http/httptrace" + "time" +) + +type contextContext interface { + context.Context +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return context.WithCancel(ctx) +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req.WithContext(ctx) +} + +type clientTrace httptrace.ClientTrace + +func reqContext(r *http.Request) context.Context { return r.Context() } + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *clientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *clientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *clientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *clientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *clientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + +func requestTrace(req *http.Request) *clientTrace { + trace := httptrace.ContextClientTrace(req.Context()) + return (*clientTrace)(trace) +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + return cc.ping(ctx) +} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go new file mode 100644 index 000000000..b4c52ecec --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17_not18.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,!go1.8 + +package http2 + +import "crypto/tls" + +// temporary copy of Go 1.7's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go new file mode 100644 index 000000000..633202c39 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go18.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "crypto/tls" + "io" + "net/http" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } + +var _ http.Pusher = (*responseWriter)(nil) + +// Push implements http.Pusher. +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + internalOpts := pushOptions{} + if opts != nil { + internalOpts.Method = opts.Method + internalOpts.Header = opts.Header + } + return w.push(target, internalOpts) +} + +func configureServer18(h1 *http.Server, h2 *Server) error { + if h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil && panicValue != http.ErrAbortHandler +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return req.GetBody +} + +func reqBodyIsNoBody(body io.ReadCloser) bool { + return body == http.NoBody +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 000000000..9933c9f8c --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000..c2805f6ac --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000..f9bb03398 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + for idx, hf := range staticTable { + if !constantTimeStringCompare(hf.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(idx + 1) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(hf.Value, f.Value) { + continue + } + i = uint64(idx + 1) + nameValueMatch = true + return + } + + j, nameValueMatch := e.dynTab.search(f) + if nameValueMatch || (i == 0 && j != 0) { + i = j + uint64(len(staticTable)) + } + return +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000..135b9f62c --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,542 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // ents is the FIFO described at + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // The newest (low index) is append at the end, and items are + // evicted from the front. + ents []HeaderField + size uint32 + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +// TODO: change dynamicTable to be a struct with a slice and a size int field, +// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: +// +// +// Then make add increment the size. maybe the max size should move from Decoder to +// dynamicTable and add should return an ok bool if there was enough space. +// +// Later we'll need a remove operation on dynamicTable. + +func (dt *dynamicTable) add(f HeaderField) { + dt.ents = append(dt.ents, f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff (front of the slice) +func (dt *dynamicTable) evict() { + base := dt.ents // keep base pointer of slice + for dt.size > dt.maxSize { + dt.size -= dt.ents[0].Size() + dt.ents = dt.ents[1:] + } + + // Shift slice contents down if we evicted things. + if len(dt.ents) != len(base) { + copy(base, dt.ents) + dt.ents = base[:len(dt.ents)] + } +} + +// constantTimeStringCompare compares string a and b in a constant +// time manner. +func constantTimeStringCompare(a, b string) bool { + if len(a) != len(b) { + return false + } + + c := byte(0) + + for i := 0; i < len(a); i++ { + c |= a[i] ^ b[i] + } + + return c == 0 +} + +// Search searches f in the table. The return value i is 0 if there is +// no name match. If there is name match or name/value match, i is the +// index of that entry (1-based). If both name and value match, +// nameValueMatch becomes true. +func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + l := len(dt.ents) + for j := l - 1; j >= 0; j-- { + ent := dt.ents[j] + if !constantTimeStringCompare(ent.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(l - j) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(ent.Value, f.Value) { + continue + } + i = uint64(l - j) + nameValueMatch = true + return + } + return +} + +func (d *Decoder) maxTableIndex() int { + return len(d.dynTab.ents) + len(staticTable) +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + if i < 1 { + return + } + if i > uint64(d.maxTableIndex()) { + return + } + if i <= uint64(len(staticTable)) { + return staticTable[i-1], true + } + dents := d.dynTab.ents + return dents[len(dents)-(int(i)-len(staticTable))], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000..8850e3946 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000..b9283a023 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,352 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = [...]HeaderField{ + pair(":authority", ""), // index 1 (1-based) + pair(":method", "GET"), + pair(":method", "POST"), + pair(":path", "/"), + pair(":path", "/index.html"), + pair(":scheme", "http"), + pair(":scheme", "https"), + pair(":status", "200"), + pair(":status", "204"), + pair(":status", "206"), + pair(":status", "304"), + pair(":status", "400"), + pair(":status", "404"), + pair(":status", "500"), + pair("accept-charset", ""), + pair("accept-encoding", "gzip, deflate"), + pair("accept-language", ""), + pair("accept-ranges", ""), + pair("accept", ""), + pair("access-control-allow-origin", ""), + pair("age", ""), + pair("allow", ""), + pair("authorization", ""), + pair("cache-control", ""), + pair("content-disposition", ""), + pair("content-encoding", ""), + pair("content-language", ""), + pair("content-length", ""), + pair("content-location", ""), + pair("content-range", ""), + pair("content-type", ""), + pair("cookie", ""), + pair("date", ""), + pair("etag", ""), + pair("expect", ""), + pair("expires", ""), + pair("from", ""), + pair("host", ""), + pair("if-match", ""), + pair("if-modified-since", ""), + pair("if-none-match", ""), + pair("if-range", ""), + pair("if-unmodified-since", ""), + pair("last-modified", ""), + pair("link", ""), + pair("location", ""), + pair("max-forwards", ""), + pair("proxy-authenticate", ""), + pair("proxy-authorization", ""), + pair("range", ""), + pair("referer", ""), + pair("refresh", ""), + pair("retry-after", ""), + pair("server", ""), + pair("set-cookie", ""), + pair("strict-transport-security", ""), + pair("transfer-encoding", ""), + pair("user-agent", ""), + pair("vary", ""), + pair("via", ""), + pair("www-authenticate", ""), +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000..b6b0f9ad1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,387 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/', but not with with "//", +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 000000000..efd2e1282 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "crypto/tls" + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 000000000..140434a79 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 000000000..efbf83c32 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000..53b7a1daf --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,153 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000..3c6b90ccd --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2753 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + if err := configureServer18(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256. + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + if cs == requiredCipher { + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + wantStartPushCh: make(chan startPushRequest, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here so that it is not applied to additional + // streams opened on this connection. + // TODO: implement WriteTimeout fully. See Issue 18437. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wantStartPushCh chan startPushRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + testHookCh chan func(int) // code to run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimerCh <-chan time.Time // nil until used + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + idleTimerCh <-chan time.Time // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + reqBuf []byte // if non-nil, body pipe buffer to return later at EOF + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + + // TODO: more actual settings, notably + // SettingInitialWindowSize, but then we also + // want to bump up the conn window size the + // same amount here right after the settings + }, + }) + sc.unackedSettings++ + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) + defer sc.idleTimer.Stop() + sc.idleTimerCh = sc.idleTimer.C + } + + var gracefulShutdownCh <-chan struct{} + if sc.hs != nil { + gracefulShutdownCh = h1ServerShutdownChan(sc.hs) + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.NewTimer(firstSettingsTimeout) + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + sc.writeFrame(wr) + case spr := <-sc.wantStartPushCh: + sc.startPush(spr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer.C != nil { + settingsTimer.Stop() + settingsTimer.C = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case <-settingsTimer.C: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case <-gracefulShutdownCh: + gracefulShutdownCh = nil + sc.startGracefulShutdown() + case <-sc.shutdownTimerCh: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case <-sc.idleTimerCh: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case fn := <-sc.testHookCh: + fn(loopNum) + } + + if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { + return + } + } +} + +// readPreface reads the ClientPreface greeting from the peer +// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errors.New("timeout waiting for client preface") + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + sc.resetStream(streamError(st.id, ErrCodeCancel)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the +// client we're gracefully shutting down. The connection isn't closed +// until all current streams are done. +func (sc *serverConn) startGracefulShutdown() { + sc.goAwayIn(ErrCodeNo, 0) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + var forceCloseIn time.Duration + if code != ErrCodeNo { + forceCloseIn = 250 * time.Millisecond + } else { + // TODO: configurable + forceCloseIn = 1 * time.Second + } + sc.goAwayIn(code, forceCloseIn) +} + +func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { + sc.serveG.check() + if sc.inGoAway { + return + } + if forceCloseIn != 0 { + sc.shutDownIn(forceCloseIn) + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.NewTimer(d) + sc.shutdownTimerCh = sc.shutdownTimer.C +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdown() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialWindowSize + sc.initialWindowSize = int32(val) + growth := sc.initialWindowSize - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdown() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + st.reqBuf = getRequestBodyBuf() + req.Body.(*requestBody).pipe = &pipe{ + b: &fixedBuffer{buf: st.reqBuf}, + } + + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +var reqBodyCache = make(chan []byte, 8) + +func getRequestBodyBuf() []byte { + select { + case b := <-reqBodyCache: + return b + default: + return make([]byte, initialWindowSize) + } +} + +func putRequestBodyBuf(b []byte) { + select { + case reqBodyCache <- b: + default: + } +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } + if err == io.EOF { + if buf := st.reqBuf; buf != nil { + st.reqBuf = nil // shouldn't matter; field unused by other + putRequestBodyBuf(buf) + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler migth call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + rws.handlerDone = true + w.Flush() + w.rws = nil + responseWriterStatePool.Put(rws) +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.wantStartPushCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdown() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerShutdownChan returns a channel that will be closed when the +// provided *http.Server wants to shut down. +// +// This is a somewhat hacky way to get at http1 innards. It works +// when the http2 code is bundled into the net/http package in the +// standard library. The alternatives ended up making the cmd/go tool +// depend on http Servers. This is the lightest option for now. +// This is tested via the TestServeShutdown* tests in net/http. +func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { + if fn := testh1ServerShutdownChan; fn != nil { + return fn(hs) + } + var x interface{} = hs + type I interface { + getDoneChan() <-chan struct{} + } + if hs, ok := x.(I); ok { + return hs.getDoneChan() + } + return nil +} + +// optional test hook for h1ServerShutdownChan. +var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000..0c7e859db --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2129 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel runs in its own goroutine and waits for the user +// to cancel a RoundTrip request, its context to expire, or for the +// request to be done (any way it might be removed from the cc.streams +// map: peer reset, successful completion, TCP connection breakage, +// etc) +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return + } + select { + case <-req.Cancel: + cs.cancelStream() + cs.bufPipe.CloseWithError(errRequestCanceled) + case <-ctx.Done(): + cs.cancelStream() + cs.bufPipe.CloseWithError(ctx.Err()) + case <-cs.done: + } +} + +func (cs *clientStream) cancelStream() { + cs.cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cs.cc.mu.Unlock() + + if !didReset { + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, err := cc.RoundTrip(req) + if err != nil { + if req, err = shouldRetryRequest(req, err); err == nil { + continue + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { + switch err { + default: + return nil, err + case errClientConnUnusable, errClientConnGotGoAway: + return req, nil + case errClientConnGotGoAwayAfterSomeReqBody: + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && + cc.nextStreamID < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + cc.mu.Unlock() + return nil, errClientConnUnusable + } + + body := req.Body + hasBody := body != nil + contentLen := actualContentLength(req) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + if re.err == errClientConnGotGoAway { + cc.mu.Lock() + if cs.startedWrite { + re.err = errClientConnGotGoAwayAfterSomeReqBody + } + cc.mu.Unlock() + } + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-ctx.Done(): + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, ctx.Err() + case <-req.Cancel: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + defer cc.mu.Unlock() + trls = cc.encodeTrailers(req) + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + cc.writeHeader(":authority", host) + cc.writeHeader(":method", req.Method) + if req.Method != "CONNECT" { + cc.writeHeader(":path", path) + cc.writeHeader(":scheme", req.URL.Scheme) + } + if trailers != "" { + cc.writeHeader("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + cc.writeHeader("accept-encoding", "gzip") + } + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { + cc.hbuf.Reset() + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filter at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes() +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if f.Length > 0 { + if len(data) > 0 && cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } + + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + cs.inflow.add(pad) + cc.inflow.add(pad) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(pad)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) + cc.bw.Flush() + cc.wmu.Unlock() + } + didReset := cs.didReset + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000..6b0dfae31 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,370 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + if p.code != 0 { + ctx.Flush() // ignore error: we're hanging up on them anyway + time.Sleep(50 * time.Millisecond) + ctx.CloseConn() + } + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000..4fe307307 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 000000000..01132721b --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 000000000..36d7919f1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 000000000..3daa8979e --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna // import "golang.org/x/net/idna" + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 000000000..92e733f6a --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 000000000..20f2b8940 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 000000000..69a4ac7ee --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 000000000..2dab943a4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 000000000..8cffdd16c --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 000000000..0895dea19 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 000000000..e242c89a7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 000000000..9a4f8d59e --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 000000000..b5ee9a945 --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,73 @@ +# github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 +github.com/armon/go-proxyproto +# github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 +github.com/circonus-labs/circonus-gometrics +github.com/circonus-labs/circonus-gometrics/api +github.com/circonus-labs/circonus-gometrics/checkmgr +# github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 +github.com/circonus-labs/circonusllhist +# github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 +github.com/cyberdelia/go-metrics-graphite +# github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10 +github.com/fatih/structs +# github.com/gobwas/glob v0.0.0-20180208211842-19c076cdf202 +github.com/gobwas/glob +github.com/gobwas/glob/compiler +github.com/gobwas/glob/syntax +github.com/gobwas/glob/match +github.com/gobwas/glob/syntax/ast +github.com/gobwas/glob/util/runes +github.com/gobwas/glob/syntax/lexer +github.com/gobwas/glob/util/strings +# github.com/hashicorp/consul v0.8.0 +github.com/hashicorp/consul/api +# github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce +github.com/hashicorp/errwrap +# github.com/hashicorp/go-cleanhttp v0.0.0-20160407174126-ad28ea4487f0 +github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-multierror v0.0.0-20150916205742-d30f09973e19 +github.com/hashicorp/go-multierror +# github.com/hashicorp/go-retryablehttp v0.0.0-20160810172255-f4ed9b0fa01a +github.com/hashicorp/go-retryablehttp +# github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 +github.com/hashicorp/go-rootcerts +# github.com/hashicorp/hcl v0.0.0-20160607001940-d7400db7143f +github.com/hashicorp/hcl +github.com/hashicorp/hcl/hcl/ast +github.com/hashicorp/hcl/hcl/parser +github.com/hashicorp/hcl/hcl/token +github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/hcl/scanner +github.com/hashicorp/hcl/hcl/strconv +github.com/hashicorp/hcl/json/scanner +github.com/hashicorp/hcl/json/token +# github.com/hashicorp/serf v0.7.0 +github.com/hashicorp/serf/coordinate +# github.com/hashicorp/vault v0.6.0 +github.com/hashicorp/vault/api +# github.com/magiconair/properties v0.0.0-20171031211101-49d762b9817b +github.com/magiconair/properties +# github.com/mitchellh/go-homedir v0.0.0-20160606030122-1111e456ffea +github.com/mitchellh/go-homedir +# github.com/mitchellh/mapstructure v0.0.0-20160212031839-d2dd02622084 +github.com/mitchellh/mapstructure +# github.com/pascaldekloe/goe v0.0.0-20150719195608-f99183613f48 +github.com/pascaldekloe/goe/verify +# github.com/pkg/profile v1.2.1 +github.com/pkg/profile +# github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b +github.com/pubnub/go-metrics-statsd +# github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 +github.com/rcrowley/go-metrics +# github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af +github.com/rogpeppe/fastuuid +# github.com/sergi/go-diff v0.0.0-20170118131230-24e2351369ec +github.com/sergi/go-diff/diffmatchpatch +# golang.org/x/net v0.0.0-20170114055629-f2499483f923 +golang.org/x/net/websocket +golang.org/x/net/http2 +golang.org/x/net/http2/hpack +golang.org/x/net/idna +golang.org/x/net/lex/httplex +# golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654 +golang.org/x/sync/singleflight From 58c9ff2d0f2a4492fa6363e43513cb8eca6125bc Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 22:03:03 +0200 Subject: [PATCH 12/44] remove references to govendor and vendorfmt --- Makefile | 20 ++------------------ docs/content/contrib/guidelines.md | 9 ++------- 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index 695439625..e53af36e2 100644 --- a/Makefile +++ b/Makefile @@ -20,12 +20,6 @@ GOVERSION = $(shell go version | awk '{print $$3;}') # GORELEASER is the path to the goreleaser binary. GORELEASER = $(shell which goreleaser) -# GOVENDOR is the path to the govendor binary. -GOVENDOR = $(shell which govendor) - -# VENDORFMT is the path to the vendorfmt binary. -VENDORFMT = $(shell which vendorfmt) - # pin versions for CI builds CI_CONSUL_VERSION=1.0.6 CI_VAULT_VERSION=0.9.6 @@ -46,23 +40,13 @@ help: @echo "clean - remove temp files" # build compiles fabio and the test dependencies -build: checkdeps vendorfmt gofmt +build: gofmt go build # test runs the tests test: build go test -v -test.timeout 15s `go list ./... | grep -v '/vendor/'` -# checkdeps ensures that all required dependencies are vendored in -checkdeps: - [ -x "$(GOVENDOR)" ] || go get -u github.com/kardianos/govendor - govendor list +e | grep '^ e ' && { echo "Found missing packages. Please run 'govendor add +e'"; exit 1; } || : echo - -# vendorfmt ensures that the vendor/vendor.json file is formatted correctly -vendorfmt: - [ -x "$(VENDORFMT)" ] || go get -u github.com/magiconair/vendorfmt/cmd/vendorfmt - vendorfmt - # gofmt runs gofmt on the code gofmt: gofmt -s -w `find . -type f -name '*.go' | grep -v vendor` @@ -156,4 +140,4 @@ clean: rm -rf pkg dist fabio find . -name '*.test' -delete -.PHONY: all build checkdeps clean codeship gofmt gorelease help homebrew install linux pkg preflight release tag test vendorfmt +.PHONY: all build clean codeship gofmt gorelease help homebrew install linux pkg preflight release tag test diff --git a/docs/content/contrib/guidelines.md b/docs/content/contrib/guidelines.md index 824f49413..af96ea4f0 100644 --- a/docs/content/contrib/guidelines.md +++ b/docs/content/contrib/guidelines.md @@ -16,13 +16,8 @@ we ask for the following: already underway. * Only add libraries if they provide significant value. Consider copying the code (attribution) or writing it yourself. -* Manage dependencies in the vendor path via `govendor` as separate commits per library - and format the `vendor.json` file with `vendorfmt`. Please make sure your commit - message has the following format: - -``` -Vendoring in version of -``` +* Manage dependencies with `go mod` and run `go mod vendor` afterwards to + sync the `vendor` folder for backwards compatibility. Once you are ready to send in a pull request, be sure to: From edca2134c862b9ea55dcf1a4532f59baceb974e6 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 16 Aug 2018 22:39:00 +0200 Subject: [PATCH 13/44] add missing dependency --- .gitignore | 2 +- vendor/github.com/pkg/profile/.travis.yml | 11 + vendor/github.com/pkg/profile/AUTHORS | 1 + vendor/github.com/pkg/profile/LICENSE | 24 +++ vendor/github.com/pkg/profile/README.md | 54 +++++ vendor/github.com/pkg/profile/mutex.go | 13 ++ vendor/github.com/pkg/profile/mutex17.go | 9 + vendor/github.com/pkg/profile/profile.go | 244 ++++++++++++++++++++++ vendor/github.com/pkg/profile/trace.go | 8 + vendor/github.com/pkg/profile/trace16.go | 10 + 10 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/pkg/profile/.travis.yml create mode 100644 vendor/github.com/pkg/profile/AUTHORS create mode 100644 vendor/github.com/pkg/profile/LICENSE create mode 100644 vendor/github.com/pkg/profile/README.md create mode 100644 vendor/github.com/pkg/profile/mutex.go create mode 100644 vendor/github.com/pkg/profile/mutex17.go create mode 100644 vendor/github.com/pkg/profile/profile.go create mode 100644 vendor/github.com/pkg/profile/trace.go create mode 100644 vendor/github.com/pkg/profile/trace16.go diff --git a/.gitignore b/.gitignore index 0860c372a..b214b1379 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,5 @@ fabio fabio.exe fabio.sublime-* demo/cert/ -pkg/ +/pkg/ dist/ diff --git a/vendor/github.com/pkg/profile/.travis.yml b/vendor/github.com/pkg/profile/.travis.yml new file mode 100644 index 000000000..192c5c27f --- /dev/null +++ b/vendor/github.com/pkg/profile/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/profile +go: + - 1.4.3 + - 1.5.2 + - 1.6.3 + - tip + +script: + - go test github.com/pkg/profile + - go test -race github.com/pkg/profile diff --git a/vendor/github.com/pkg/profile/AUTHORS b/vendor/github.com/pkg/profile/AUTHORS new file mode 100644 index 000000000..00441d354 --- /dev/null +++ b/vendor/github.com/pkg/profile/AUTHORS @@ -0,0 +1 @@ +Dave Cheney diff --git a/vendor/github.com/pkg/profile/LICENSE b/vendor/github.com/pkg/profile/LICENSE new file mode 100644 index 000000000..f747a8411 --- /dev/null +++ b/vendor/github.com/pkg/profile/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013 Dave Cheney. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/profile/README.md b/vendor/github.com/pkg/profile/README.md new file mode 100644 index 000000000..37bfa58c5 --- /dev/null +++ b/vendor/github.com/pkg/profile/README.md @@ -0,0 +1,54 @@ +profile +======= + +Simple profiling support package for Go + +[![Build Status](https://travis-ci.org/pkg/profile.svg?branch=master)](https://travis-ci.org/pkg/profile) [![GoDoc](http://godoc.org/github.com/pkg/profile?status.svg)](http://godoc.org/github.com/pkg/profile) + + +installation +------------ + + go get github.com/pkg/profile + +usage +----- + +Enabling profiling in your application is as simple as one line at the top of your main function + +```go +import "github.com/pkg/profile" + +func main() { + defer profile.Start().Stop() + ... +} +``` + +options +------- + +What to profile is controlled by config value passed to profile.Start. +By default CPU profiling is enabled. + +```go +import "github.com/pkg/profile" + +func main() { + // p.Stop() must be called before the program exits to + // ensure profiling information is written to disk. + p := profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.NoShutdownHook) + ... +} +``` + +Several convenience package level values are provided for cpu, memory, and block (contention) profiling. + +For more complex options, consult the [documentation](http://godoc.org/github.com/pkg/profile). + +contributing +------------ + +We welcome pull requests, bug fixes and issue reports. + +Before proposing a change, please discuss it first by raising an issue. diff --git a/vendor/github.com/pkg/profile/mutex.go b/vendor/github.com/pkg/profile/mutex.go new file mode 100644 index 000000000..e69c5b44d --- /dev/null +++ b/vendor/github.com/pkg/profile/mutex.go @@ -0,0 +1,13 @@ +// +build go1.8 + +package profile + +import "runtime" + +func enableMutexProfile() { + runtime.SetMutexProfileFraction(1) +} + +func disableMutexProfile() { + runtime.SetMutexProfileFraction(0) +} diff --git a/vendor/github.com/pkg/profile/mutex17.go b/vendor/github.com/pkg/profile/mutex17.go new file mode 100644 index 000000000..b004c21d5 --- /dev/null +++ b/vendor/github.com/pkg/profile/mutex17.go @@ -0,0 +1,9 @@ +// +build !go1.8 + +package profile + +// mock mutex support for Go 1.7 and earlier. + +func enableMutexProfile() {} + +func disableMutexProfile() {} diff --git a/vendor/github.com/pkg/profile/profile.go b/vendor/github.com/pkg/profile/profile.go new file mode 100644 index 000000000..c44913a4c --- /dev/null +++ b/vendor/github.com/pkg/profile/profile.go @@ -0,0 +1,244 @@ +// Package profile provides a simple way to manage runtime/pprof +// profiling of your Go application. +package profile + +import ( + "io/ioutil" + "log" + "os" + "os/signal" + "path/filepath" + "runtime" + "runtime/pprof" + "sync/atomic" +) + +const ( + cpuMode = iota + memMode + mutexMode + blockMode + traceMode +) + +// Profile represents an active profiling session. +type Profile struct { + // quiet suppresses informational messages during profiling. + quiet bool + + // noShutdownHook controls whether the profiling package should + // hook SIGINT to write profiles cleanly. + noShutdownHook bool + + // mode holds the type of profiling that will be made + mode int + + // path holds the base path where various profiling files are written. + // If blank, the base path will be generated by ioutil.TempDir. + path string + + // memProfileRate holds the rate for the memory profile. + memProfileRate int + + // closer holds a cleanup function that run after each profile + closer func() + + // stopped records if a call to profile.Stop has been made + stopped uint32 +} + +// NoShutdownHook controls whether the profiling package should +// hook SIGINT to write profiles cleanly. +// Programs with more sophisticated signal handling should set +// this to true and ensure the Stop() function returned from Start() +// is called during shutdown. +func NoShutdownHook(p *Profile) { p.noShutdownHook = true } + +// Quiet suppresses informational messages during profiling. +func Quiet(p *Profile) { p.quiet = true } + +// CPUProfile enables cpu profiling. +// It disables any previous profiling settings. +func CPUProfile(p *Profile) { p.mode = cpuMode } + +// DefaultMemProfileRate is the default memory profiling rate. +// See also http://golang.org/pkg/runtime/#pkg-variables +const DefaultMemProfileRate = 4096 + +// MemProfile enables memory profiling. +// It disables any previous profiling settings. +func MemProfile(p *Profile) { + p.memProfileRate = DefaultMemProfileRate + p.mode = memMode +} + +// MemProfileRate enables memory profiling at the preferred rate. +// It disables any previous profiling settings. +func MemProfileRate(rate int) func(*Profile) { + return func(p *Profile) { + p.memProfileRate = rate + p.mode = memMode + } +} + +// MutexProfile enables mutex profiling. +// It disables any previous profiling settings. +// +// Mutex profiling is a no-op before go1.8. +func MutexProfile(p *Profile) { p.mode = mutexMode } + +// BlockProfile enables block (contention) profiling. +// It disables any previous profiling settings. +func BlockProfile(p *Profile) { p.mode = blockMode } + +// Trace profile controls if execution tracing will be enabled. It disables any previous profiling settings. +func TraceProfile(p *Profile) { p.mode = traceMode } + +// ProfilePath controls the base path where various profiling +// files are written. If blank, the base path will be generated +// by ioutil.TempDir. +func ProfilePath(path string) func(*Profile) { + return func(p *Profile) { + p.path = path + } +} + +// Stop stops the profile and flushes any unwritten data. +func (p *Profile) Stop() { + if !atomic.CompareAndSwapUint32(&p.stopped, 0, 1) { + // someone has already called close + return + } + p.closer() + atomic.StoreUint32(&started, 0) +} + +// started is non zero if a profile is running. +var started uint32 + +// Start starts a new profiling session. +// The caller should call the Stop method on the value returned +// to cleanly stop profiling. +func Start(options ...func(*Profile)) interface { + Stop() +} { + if !atomic.CompareAndSwapUint32(&started, 0, 1) { + log.Fatal("profile: Start() already called") + } + + var prof Profile + for _, option := range options { + option(&prof) + } + + path, err := func() (string, error) { + if p := prof.path; p != "" { + return p, os.MkdirAll(p, 0777) + } + return ioutil.TempDir("", "profile") + }() + + if err != nil { + log.Fatalf("profile: could not create initial output directory: %v", err) + } + + logf := func(format string, args ...interface{}) { + if !prof.quiet { + log.Printf(format, args...) + } + } + + switch prof.mode { + case cpuMode: + fn := filepath.Join(path, "cpu.pprof") + f, err := os.Create(fn) + if err != nil { + log.Fatalf("profile: could not create cpu profile %q: %v", fn, err) + } + logf("profile: cpu profiling enabled, %s", fn) + pprof.StartCPUProfile(f) + prof.closer = func() { + pprof.StopCPUProfile() + f.Close() + logf("profile: cpu profiling disabled, %s", fn) + } + + case memMode: + fn := filepath.Join(path, "mem.pprof") + f, err := os.Create(fn) + if err != nil { + log.Fatalf("profile: could not create memory profile %q: %v", fn, err) + } + old := runtime.MemProfileRate + runtime.MemProfileRate = prof.memProfileRate + logf("profile: memory profiling enabled (rate %d), %s", runtime.MemProfileRate, fn) + prof.closer = func() { + pprof.Lookup("heap").WriteTo(f, 0) + f.Close() + runtime.MemProfileRate = old + logf("profile: memory profiling disabled, %s", fn) + } + + case mutexMode: + fn := filepath.Join(path, "mutex.pprof") + f, err := os.Create(fn) + if err != nil { + log.Fatalf("profile: could not create mutex profile %q: %v", fn, err) + } + enableMutexProfile() + logf("profile: mutex profiling enabled, %s", fn) + prof.closer = func() { + if mp := pprof.Lookup("mutex"); mp != nil { + mp.WriteTo(f, 0) + } + f.Close() + disableMutexProfile() + logf("profile: mutex profiling disabled, %s", fn) + } + + case blockMode: + fn := filepath.Join(path, "block.pprof") + f, err := os.Create(fn) + if err != nil { + log.Fatalf("profile: could not create block profile %q: %v", fn, err) + } + runtime.SetBlockProfileRate(1) + logf("profile: block profiling enabled, %s", fn) + prof.closer = func() { + pprof.Lookup("block").WriteTo(f, 0) + f.Close() + runtime.SetBlockProfileRate(0) + logf("profile: block profiling disabled, %s", fn) + } + + case traceMode: + fn := filepath.Join(path, "trace.out") + f, err := os.Create(fn) + if err != nil { + log.Fatalf("profile: could not create trace output file %q: %v", fn, err) + } + if err := startTrace(f); err != nil { + log.Fatalf("profile: could not start trace: %v", err) + } + logf("profile: trace enabled, %s", fn) + prof.closer = func() { + stopTrace() + logf("profile: trace disabled, %s", fn) + } + } + + if !prof.noShutdownHook { + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + <-c + + log.Println("profile: caught interrupt, stopping profiles") + prof.Stop() + + os.Exit(0) + }() + } + + return &prof +} diff --git a/vendor/github.com/pkg/profile/trace.go b/vendor/github.com/pkg/profile/trace.go new file mode 100644 index 000000000..b349ed8b2 --- /dev/null +++ b/vendor/github.com/pkg/profile/trace.go @@ -0,0 +1,8 @@ +// +build go1.7 + +package profile + +import "runtime/trace" + +var startTrace = trace.Start +var stopTrace = trace.Stop diff --git a/vendor/github.com/pkg/profile/trace16.go b/vendor/github.com/pkg/profile/trace16.go new file mode 100644 index 000000000..6aa6566ef --- /dev/null +++ b/vendor/github.com/pkg/profile/trace16.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package profile + +import "io" + +// mock trace support for Go 1.6 and earlier. + +func startTrace(w io.Writer) error { return nil } +func stopTrace() {} From bc76b9e969850188365910b3a6de3ae19991d9ef Mon Sep 17 00:00:00 2001 From: Richard Kettelerij Date: Thu, 23 Aug 2018 10:01:38 +0200 Subject: [PATCH 14/44] Delete duplicate page This page is a dupe of https://github.com/fabiolb/fabio/blob/master/docs/content/feature/http-path-stripping.md --- docs/content/feature/path-stripping.md | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 docs/content/feature/path-stripping.md diff --git a/docs/content/feature/path-stripping.md b/docs/content/feature/path-stripping.md deleted file mode 100644 index 116563471..000000000 --- a/docs/content/feature/path-stripping.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: "Path Stripping" -since: "1.3.7" ---- - -fabio supports stripping a path from the incoming request. If you want to -forward `http://host/foo/bar` as `http://host/bar` you can add a `strip=/foo` -option to the route options as `urlprefix-/foo/bar strip=/foo`. - From f3f655264d997bf8a9178744818c06a93bfefd1d Mon Sep 17 00:00:00 2001 From: Shantanu Gadgil Date: Wed, 12 Sep 2018 00:17:40 +0530 Subject: [PATCH 15/44] compare host using lowercase (#543) use lowercase comparison for host lookup - fixes #542 --- route/table.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/route/table.go b/route/table.go index bb689dcff..1646e741d 100644 --- a/route/table.go +++ b/route/table.go @@ -136,6 +136,7 @@ func NewTable(s string) (t Table, err error) { // addRoute adds a new route prefix -> target for the given service. func (t Table) addRoute(d *RouteDef) error { host, path := hostpath(d.Src) + host = strings.ToLower(host) // maintain compatibility with parseURLPrefixTag if d.Src == "" { return errInvalidPrefix @@ -383,6 +384,7 @@ func (t Table) LookupHost(host string, pick picker) *Target { } func (t Table) lookup(host, path, trace string, pick picker, match matcher) *Target { + host = strings.ToLower(host) // routes are always added lowercase for _, r := range t[host] { if match(path, r) { n := len(r.Targets) From fe7f77897c2a88e59df42422e9d0da91623aa439 Mon Sep 17 00:00:00 2001 From: Aaron Hurt Date: Tue, 11 Sep 2018 14:07:10 -0500 Subject: [PATCH 16/44] update changelog --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75310e3f9..698e34da0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,16 @@ #### Improvements + * [Issue #542](https://github.com/fabiolb/fabio/issues/542): urlprefix allow to ignore case + + Fabio was forcing hostnames in routes added via Consul tags to lowercase. This caused problems + with table lookups that were not case-insensitive. The patch appled in #543 forces all routes added + via consul tags or the internal `addRoute` to have lower case hostnames in addition to forcing + hostnames to lowercase before performing table lookups. This means that routes and table lookups + in fabio are no longer case sensitive. + + Thanks to [@shantanugadgil](https://github.com/shantanugadgil) for the patch. + #### Features ### [v1.5.9](https://github.com/fabiolb/fabio/releases/tag/v1.5.9) - 16 May 2018 From c730093c5479ff569a550470e0092ede8c5f7a0c Mon Sep 17 00:00:00 2001 From: Aaron Hurt Date: Tue, 11 Sep 2018 14:09:49 -0500 Subject: [PATCH 17/44] update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 698e34da0..aa1d5cdaf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ #### Improvements - * [Issue #542](https://github.com/fabiolb/fabio/issues/542): urlprefix allow to ignore case + * [Issue #542](https://github.com/fabiolb/fabio/issues/542): Ignore host case when adding and matching routes Fabio was forcing hostnames in routes added via Consul tags to lowercase. This caused problems with table lookups that were not case-insensitive. The patch appled in #543 forces all routes added From dc67d5a40ebff0cd9117585136c975413c46427a Mon Sep 17 00:00:00 2001 From: holtwilkins <5665043+holtwilkins@users.noreply.github.com> Date: Wed, 12 Sep 2018 23:55:02 +1000 Subject: [PATCH 18/44] Add $host pseudo variable (#544) This adds support for $host, which behaves similarly to $path. The idea with this is to be able to create a global redirect of anything received on, say, http to the https equivalent of what you were trying to hit. Fixes #533 --- proxy/http_integration_test.go | 41 +++++++++++++++++++++++++- route/table.go | 1 + route/target.go | 3 ++ route/target_test.go | 54 ++++++++++++++++++++++++++++++++-- 4 files changed, 96 insertions(+), 3 deletions(-) diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index 453d7fdb8..ddc6c4c4c 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -262,7 +262,46 @@ func TestProxyHost(t *testing.T) { }) } -func TestRedirect(t *testing.T) { +func TestHostRedirect(t *testing.T) { + routes := "route add https-redir *:80 https://$host$path opts \"redirect=301\"\n" + + tbl, _ := route.NewTable(routes) + + proxy := httptest.NewServer(&HTTPProxy{ + Transport: http.DefaultTransport, + Lookup: func(r *http.Request) *route.Target { + r.Host = "c.com" + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + }, + }) + defer proxy.Close() + + tests := []struct { + req string + wantCode int + wantLoc string + }{ + {req: "/baz", wantCode: 301, wantLoc: "https://c.com/baz"}, + } + + http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // do not follow redirects + return http.ErrUseLastResponse + } + + for _, tt := range tests { + resp, _ := mustGet(proxy.URL + tt.req) + if resp.StatusCode != tt.wantCode { + t.Errorf("got status code %d, want %d", resp.StatusCode, tt.wantCode) + } + gotLoc, _ := resp.Location() + if gotLoc.String() != tt.wantLoc { + t.Errorf("got location %s, want %s", gotLoc, tt.wantLoc) + } + } +} + +func TestPathRedirect(t *testing.T) { routes := "route add mock / http://a.com/$path opts \"redirect=301\"\n" routes += "route add mock /foo http://a.com/abc opts \"redirect=301\"\n" routes += "route add mock /bar http://b.com/$path opts \"redirect=302 strip=/bar\"\n" diff --git a/route/table.go b/route/table.go index 1646e741d..117543e77 100644 --- a/route/table.go +++ b/route/table.go @@ -360,6 +360,7 @@ func (t Table) Lookup(req *http.Request, trace string, pick picker, match matche for _, h := range hosts { if target = t.lookup(h, req.URL.Path, trace, pick, match); target != nil { if target.RedirectCode != 0 { + req.URL.Host = req.Host target.BuildRedirectURL(req.URL) // build redirect url and cache in target if target.RedirectURL.Scheme == req.Header.Get("X-Forwarded-Proto") && target.RedirectURL.Host == req.Host && diff --git a/route/target.go b/route/target.go index db9e5244b..08a1e36dc 100644 --- a/route/target.go +++ b/route/target.go @@ -85,4 +85,7 @@ func (t *Target) BuildRedirectURL(requestURL *url.URL) { if t.RedirectURL.Path == "" { t.RedirectURL.Path = "/" } + if strings.Contains(t.RedirectURL.Host, "$host") { + t.RedirectURL.Host = strings.Replace(t.RedirectURL.Host, "$host", requestURL.Host, 1) + } } diff --git a/route/target_test.go b/route/target_test.go index 21ad2bc42..0eed8a0bb 100644 --- a/route/target_test.go +++ b/route/target_test.go @@ -32,6 +32,16 @@ func TestTarget_BuildRedirectURL(t *testing.T) { {req: "/?aaa=1", want: "http://bar.com/a/b/c?foo=bar"}, }, }, + { // simple http -> https redirect with static path + route: "route add redirect *:80/ https://$host/", + tests: []routeTest{ + {req: "/", want: "https://foo.com/"}, + {req: "/abc", want: "https://foo.com/"}, + {req: "/a/b/c", want: "https://foo.com/"}, + {req: "/?aaa=1", want: "https://foo.com/"}, + {req: "/abc/?aaa=1", want: "https://foo.com/"}, + }, + }, { // simple redirect to corresponding path route: "route add svc / http://bar.com/$path", tests: []routeTest{ @@ -42,7 +52,17 @@ func TestTarget_BuildRedirectURL(t *testing.T) { {req: "/abc/?aaa=1", want: "http://bar.com/abc/?aaa=1"}, }, }, - { // same as above but without / before $path + { // simple http -> https redirect to corresponding host & path + route: "route add redirect *:80/ https://$host/$path", + tests: []routeTest{ + {req: "/", want: "https://foo.com/"}, + {req: "/abc", want: "https://foo.com/abc"}, + {req: "/a/b/c", want: "https://foo.com/a/b/c"}, + {req: "/?aaa=1", want: "https://foo.com/?aaa=1"}, + {req: "/abc/?aaa=1", want: "https://foo.com/abc/?aaa=1"}, + }, + }, + { // simple redirect to corresponding path without / before $path route: "route add svc / http://bar.com$path", tests: []routeTest{ {req: "/", want: "http://bar.com/"}, @@ -52,6 +72,16 @@ func TestTarget_BuildRedirectURL(t *testing.T) { {req: "/abc/?aaa=1", want: "http://bar.com/abc/?aaa=1"}, }, }, + { // simple http -> https redirect to corresponding host & path without / before $path + route: "route add redirect *:80/ https://$host$path", + tests: []routeTest{ + {req: "/", want: "https://foo.com/"}, + {req: "/abc", want: "https://foo.com/abc"}, + {req: "/a/b/c", want: "https://foo.com/a/b/c"}, + {req: "/?aaa=1", want: "https://foo.com/?aaa=1"}, + {req: "/abc/?aaa=1", want: "https://foo.com/abc/?aaa=1"}, + }, + }, { // arbitrary subdir on target with $path at end route: "route add svc / http://bar.com/bbb/$path", tests: []routeTest{ @@ -62,7 +92,17 @@ func TestTarget_BuildRedirectURL(t *testing.T) { {req: "/abc/?aaa=1", want: "http://bar.com/bbb/abc/?aaa=1"}, }, }, - { // same as above but without / before $path + { // http -> https redir to corresonding host w/ arbitrary subdir on target with $path at end + route: "route add redirect *:80/ https://$host/bbb/$path", + tests: []routeTest{ + {req: "/", want: "https://foo.com/bbb/"}, + {req: "/abc", want: "https://foo.com/bbb/abc"}, + {req: "/a/b/c", want: "https://foo.com/bbb/a/b/c"}, + {req: "/?aaa=1", want: "https://foo.com/bbb/?aaa=1"}, + {req: "/abc/?aaa=1", want: "https://foo.com/bbb/abc/?aaa=1"}, + }, + }, + { // arbitrary subdir on target with $path at end but without / before $path route: "route add svc / http://bar.com/bbb$path", tests: []routeTest{ {req: "/", want: "http://bar.com/bbb/"}, @@ -72,6 +112,16 @@ func TestTarget_BuildRedirectURL(t *testing.T) { {req: "/abc/?aaa=1", want: "http://bar.com/bbb/abc/?aaa=1"}, }, }, + { // http -> https redir to corresonding host w/ arbitrary subdir on target with $path at end but without / before $path + route: "route add redirect *:80/ https://$host/bbb$path", + tests: []routeTest{ + {req: "/", want: "https://foo.com/bbb/"}, + {req: "/abc", want: "https://foo.com/bbb/abc"}, + {req: "/a/b/c", want: "https://foo.com/bbb/a/b/c"}, + {req: "/?aaa=1", want: "https://foo.com/bbb/?aaa=1"}, + {req: "/abc/?aaa=1", want: "https://foo.com/bbb/abc/?aaa=1"}, + }, + }, { // strip prefix route: "route add svc /stripme http://bar.com/$path opts \"strip=/stripme\"", tests: []routeTest{ From d16e94bf5ec435d1d6b212819244273bf06f5982 Mon Sep 17 00:00:00 2001 From: M S Vishwanath Bhat Date: Wed, 12 Sep 2018 16:36:25 +0200 Subject: [PATCH 19/44] Correct the access control feature documentation page (#546) --- docs/content/feature/access-control.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/feature/access-control.md b/docs/content/feature/access-control.md index 90a4a195e..5ffed3d84 100644 --- a/docs/content/feature/access-control.md +++ b/docs/content/feature/access-control.md @@ -40,8 +40,8 @@ The source ip used for validation against the defined ruleset is taken from information available in the request. For `HTTP` requests the client `RemoteAddr` is always validated -followed by the first element of the `X-Forwarded-For` header, if -present. When either of these elements match an `allow` the request +followed by all elements of the `X-Forwarded-For` header, if +present. When both of these elements match an `allow` the request will be allowed; similarly when either element matches a `deny` the request will be denied. From c5087e06749a52e246e24f36575e16dbbafbf81f Mon Sep 17 00:00:00 2001 From: Aaron Hurt Date: Wed, 12 Sep 2018 09:39:41 -0500 Subject: [PATCH 20/44] Clarifications to access-control documentation --- docs/content/feature/access-control.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/feature/access-control.md b/docs/content/feature/access-control.md index 5ffed3d84..404ab48b1 100644 --- a/docs/content/feature/access-control.md +++ b/docs/content/feature/access-control.md @@ -41,8 +41,8 @@ taken from information available in the request. For `HTTP` requests the client `RemoteAddr` is always validated followed by all elements of the `X-Forwarded-For` header, if -present. When both of these elements match an `allow` the request -will be allowed; similarly when either element matches a `deny` the +present. When all of these elements match an `allow` the request +will be allowed; similarly when any element matches a `deny` the request will be denied. For `TCP` requests the source address of the network socket From 6d900157197151aca85bcf5e0ad84606bdded345 Mon Sep 17 00:00:00 2001 From: Aaron Hurt Date: Wed, 12 Sep 2018 13:03:05 -0500 Subject: [PATCH 21/44] update changelog --- CHANGELOG.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa1d5cdaf..f1f0ec504 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,15 +24,24 @@ * [Issue #542](https://github.com/fabiolb/fabio/issues/542): Ignore host case when adding and matching routes Fabio was forcing hostnames in routes added via Consul tags to lowercase. This caused problems - with table lookups that were not case-insensitive. The patch appled in #543 forces all routes added + with table lookups that were not case-insensitive. The patch applied in #543 forces all routes added via consul tags or the internal `addRoute` to have lower case hostnames in addition to forcing - hostnames to lowercase before performing table lookups. This means that routes and table lookups - in fabio are no longer case sensitive. + hostnames to lowercase before performing table lookups. This means that the host portion of routes and + host based table lookups in fabio are no longer case sensitive. Thanks to [@shantanugadgil](https://github.com/shantanugadgil) for the patch. #### Features + * [Issue #544](https://github.com/fabiolb/fabio/issues/544): Add $host pseudo variable + + This PR added support for `$host` pseudo variable that behaves similarly to the `$path` variable. + You should now be able to create a global redirect for requests received on any host to the same or different + request host on the same or different path when combined with the `$path` variable. This allows for a truly global + protocol redirect of HTTP -> HTTPS traffic irrespective of host and path. + + Thanks to [@holtwilkins](https://github.com/holtwilkins) for the patch. + ### [v1.5.9](https://github.com/fabiolb/fabio/releases/tag/v1.5.9) - 16 May 2018 #### Notes From 062a8ea6952a50dd3bf5cf734de37e669112bf6c Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Mon, 17 Sep 2018 13:29:16 -0500 Subject: [PATCH 22/44] Issue #548 added enable/disable glob matching --- config/config.go | 21 ++++++++++---------- config/default.go | 1 + config/load.go | 1 + fabio.properties | 11 +++++++++++ main.go | 2 +- proxy/http_integration_test.go | 29 ++++++++++++++++++++------- proxy/listen_test.go | 4 +++- proxy/ws_integration_test.go | 7 +++++-- route/issue57_test.go | 6 +++++- route/route_bench_test.go | 5 ++++- route/table.go | 36 ++++++++++++++++++++++++++-------- route/table_test.go | 9 +++++++-- 12 files changed, 99 insertions(+), 33 deletions(-) diff --git a/config/config.go b/config/config.go index f8ad7e4c2..8d0fee04c 100644 --- a/config/config.go +++ b/config/config.go @@ -7,16 +7,17 @@ import ( ) type Config struct { - Proxy Proxy - Registry Registry - Listen []Listen - Log Log - Metrics Metrics - UI UI - Runtime Runtime - ProfileMode string - ProfilePath string - Insecure bool + Proxy Proxy + Registry Registry + Listen []Listen + Log Log + Metrics Metrics + UI UI + Runtime Runtime + ProfileMode string + ProfilePath string + Insecure bool + GlobMatching bool } type CertSource struct { diff --git a/config/default.go b/config/default.go index 45f68424d..55fed35bb 100644 --- a/config/default.go +++ b/config/default.go @@ -77,4 +77,5 @@ var defaultConfig = &Config{ Color: "light-green", Access: "rw", }, + GlobMatching: true, } diff --git a/config/load.go b/config/load.go index d75188471..7992a446a 100644 --- a/config/load.go +++ b/config/load.go @@ -186,6 +186,7 @@ func load(cmdline, environ, envprefix []string, props *properties.Properties) (c f.StringVar(&cfg.UI.Title, "ui.title", defaultConfig.UI.Title, "optional title for the UI") f.StringVar(&cfg.ProfileMode, "profile.mode", defaultConfig.ProfileMode, "enable profiling mode, one of [cpu, mem, mutex, block]") f.StringVar(&cfg.ProfilePath, "profile.path", defaultConfig.ProfilePath, "path to profile dump file") + f.BoolVar(&cfg.GlobMatching, "glob.matching.enabled", defaultConfig.GlobMatching, "Enable/Disable Glob Matching on routes, one of [true, false]") // deprecated flags var proxyLogRoutes string diff --git a/fabio.properties b/fabio.properties index dae2caab0..2dc80dc85 100644 --- a/fabio.properties +++ b/fabio.properties @@ -743,6 +743,17 @@ # registry.consul.checksRequired = one +# glob.matching.enabled Enables glob matching on route lookups +# If glob matching is enabled there is a performance decrease +# for every route lookup. At a large number of services (> 500) this +# can have a significant impact on performance. If glob matching is disabled +# Fabio performs a static string compare for route lookups. +# +# The default is +# +# glob.matching.enabled = true + + # metrics.target configures the backend the metrics values are # sent to. # diff --git a/main.go b/main.go index 264c30978..769a68997 100644 --- a/main.go +++ b/main.go @@ -183,7 +183,7 @@ func newHTTPProxy(cfg *config.Config) http.Handler { Transport: newTransport(nil), InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}), Lookup: func(r *http.Request) *route.Target { - t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match) + t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, cfg) if t == nil { notFound.Inc(1) log.Print("[WARN] No route for ", r.Host, r.URL) diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index ddc6c4c4c..e67280ba2 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -175,6 +175,8 @@ func TestProxyNoRouteStatus(t *testing.T) { } func TestProxyStripsPath(t *testing.T) { + //Glob Matching True + globMatching := config.Config{GlobMatching: true} server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.RequestURI { case "/bar": @@ -188,7 +190,7 @@ func TestProxyStripsPath(t *testing.T) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock /foo/bar " + server.URL + ` opts "strip=/foo"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer proxy.Close() @@ -215,6 +217,8 @@ func TestProxyHost(t *testing.T) { routes += "route add mock /hostcustom http://b.com/ opts \"host=bar.com\"\n" routes += "route add mock / http://a.com/" tbl, _ := route.NewTable(routes) + //Glob Matching True + globMatching := config.Config{GlobMatching: true} proxy := httptest.NewServer(&HTTPProxy{ Transport: &http.Transport{ @@ -224,7 +228,7 @@ func TestProxyHost(t *testing.T) { }, }, Lookup: func(r *http.Request) *route.Target { - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer proxy.Close() @@ -266,12 +270,14 @@ func TestHostRedirect(t *testing.T) { routes := "route add https-redir *:80 https://$host$path opts \"redirect=301\"\n" tbl, _ := route.NewTable(routes) + //Glob Matching True + globMatching := config.Config{GlobMatching: true} proxy := httptest.NewServer(&HTTPProxy{ Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { r.Host = "c.com" - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer proxy.Close() @@ -306,11 +312,13 @@ func TestPathRedirect(t *testing.T) { routes += "route add mock /foo http://a.com/abc opts \"redirect=301\"\n" routes += "route add mock /bar http://b.com/$path opts \"redirect=302 strip=/bar\"\n" tbl, _ := route.NewTable(routes) + //Glob Matching True + globMatching := config.Config{GlobMatching: true} proxy := httptest.NewServer(&HTTPProxy{ Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer proxy.Close() @@ -472,13 +480,15 @@ func TestProxyHTTPSUpstream(t *testing.T) { server.TLS = tlsServerConfig() server.StartTLS() defer server.Close() + //Glob Matching True + globMatching := config.Config{GlobMatching: true} proxy := httptest.NewServer(&HTTPProxy{ Config: config.Proxy{}, Transport: &http.Transport{TLSClientConfig: tlsClientConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer proxy.Close() @@ -497,6 +507,8 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { server.TLS = &tls.Config{} server.StartTLS() defer server.Close() + //Glob Matching True + globMatching := config.Config{GlobMatching: true} proxy := httptest.NewServer(&HTTPProxy{ Config: config.Proxy{}, @@ -506,7 +518,7 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { }, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https tlsskipverify=true"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer proxy.Close() @@ -699,6 +711,9 @@ func BenchmarkProxyLogger(b *testing.B) { b.Fatal("logger.NewHTTPLogger:", err) } + //Glob Matching True + globMatching := config.Config{GlobMatching: true} + proxy := &HTTPProxy{ Config: config.Proxy{ LocalIP: "1.1.1.1", @@ -707,7 +722,7 @@ func BenchmarkProxyLogger(b *testing.B) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock / " + server.URL) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, Logger: l, } diff --git a/proxy/listen_test.go b/proxy/listen_test.go index 607461641..ae307d27a 100644 --- a/proxy/listen_test.go +++ b/proxy/listen_test.go @@ -22,6 +22,8 @@ func TestGracefulShutdown(t *testing.T) { })) defer srv.Close() + //Glob Matching True + globMatching := config.Config{GlobMatching: true} // start proxy addr := "127.0.0.1:57777" var wg sync.WaitGroup @@ -32,7 +34,7 @@ func TestGracefulShutdown(t *testing.T) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add svc / " + srv.URL) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, } l := config.Listen{Addr: addr} diff --git a/proxy/ws_integration_test.go b/proxy/ws_integration_test.go index a03977481..f1a9d8035 100644 --- a/proxy/ws_integration_test.go +++ b/proxy/ws_integration_test.go @@ -33,6 +33,9 @@ func TestProxyWSUpstream(t *testing.T) { defer wssServer.Close() t.Log("Started WSS server: ", wssServer.URL) + //Glob Matching True + globMatching := config.Config{GlobMatching: true} + routes := "route add ws /ws " + wsServer.URL + "\n" routes += "route add ws /wss " + wssServer.URL + ` opts "proto=https"` + "\n" routes += "route add ws /insecure " + wssServer.URL + ` opts "proto=https tlsskipverify=true"` + "\n" @@ -44,7 +47,7 @@ func TestProxyWSUpstream(t *testing.T) { InsecureTransport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable(routes) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) defer httpProxy.Close() @@ -56,7 +59,7 @@ func TestProxyWSUpstream(t *testing.T) { InsecureTransport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable(routes) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"]) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) }, }) httpsProxy.TLS = tlsServerConfig() diff --git a/route/issue57_test.go b/route/issue57_test.go index a0f8fb2cc..c52835996 100644 --- a/route/issue57_test.go +++ b/route/issue57_test.go @@ -1,6 +1,7 @@ package route import ( + "github.com/fabiolb/fabio/config" "net/http" "testing" ) @@ -24,6 +25,9 @@ func TestIssue57(t *testing.T) { route del svcb`, } + //Glob Matching True + globMatching := config.Config{GlobMatching: true} + req := &http.Request{URL: mustParse("/foo")} want := "http://foo.com:800" @@ -32,7 +36,7 @@ func TestIssue57(t *testing.T) { if err != nil { t.Fatalf("%d: got %v want nil", i, err) } - target := tbl.Lookup(req, "", rrPicker, prefixMatcher) + target := tbl.Lookup(req, "", rrPicker, prefixMatcher, &globMatching) if target == nil { t.Fatalf("%d: got %v want %v", i, target, want) } diff --git a/route/route_bench_test.go b/route/route_bench_test.go index 2f9126710..e11dd32be 100644 --- a/route/route_bench_test.go +++ b/route/route_bench_test.go @@ -2,6 +2,7 @@ package route import ( "fmt" + "github.com/fabiolb/fabio/config" "net/http" "sync" "testing" @@ -123,8 +124,10 @@ func makeRequests(t Table) []*http.Request { func benchmarkGet(t Table, match matcher, pick picker, pb *testing.PB) { reqs := makeRequests(t) k, n := len(reqs), 0 + //Glob Matching True + globMatching := config.Config{GlobMatching: true} for pb.Next() { - t.Lookup(reqs[n%k], "", pick, match) + t.Lookup(reqs[n%k], "", pick, match, &globMatching) n++ } } diff --git a/route/table.go b/route/table.go index 117543e77..caaf6b18d 100644 --- a/route/table.go +++ b/route/table.go @@ -12,6 +12,7 @@ import ( "sync" "sync/atomic" + "github.com/fabiolb/fabio/config" "github.com/fabiolb/fabio/metrics" "github.com/gobwas/glob" ) @@ -295,13 +296,32 @@ func normalizeHost(host string, tls bool) string { // matchingHosts returns all keys (host name patterns) from the // routing table which match the normalized request hostname. -func (t Table) matchingHosts(req *http.Request) (hosts []string) { +func (t Table) matchingHosts(req *http.Request, cfg *config.Config) (hosts []string) { host := normalizeHost(req.Host, req.TLS != nil) - for pattern := range t { - normpat := normalizeHost(pattern, req.TLS != nil) - g := glob.MustCompile(normpat) - if g.Match(host) { - hosts = append(hosts, pattern) + + // Issue 548 Glob matching causes performance decrease. + // + // Updated config to allow for disabling of Glob Matches + // Standard string compare is used if disabled + // glob.matching.enabled is false + if !cfg.GlobMatching { + for pattern := range t { + normpat := normalizeHost(pattern, req.TLS != nil) + if normpat == host { + //log.Printf("DEBUG Matched %s and %s", normpat, host) + hosts = append(hosts, pattern) + return + } + } + } else { //glob.matching.enabled is true (default) Performance hit + for pattern := range t { + normpat := normalizeHost(pattern, req.TLS != nil) + // TODO setup compiled GLOBs in a separate MAP as routes are added/deleted + // TODO Issue #548 + g := glob.MustCompile(normpat) + if g.Match(host) { + hosts = append(hosts, pattern) + } } } @@ -342,7 +362,7 @@ func Reverse(s string) string { // or nil if there is none. It first checks the routes for the host // and if none matches then it falls back to generic routes without // a host. This is useful for a catch-all '/' rule. -func (t Table) Lookup(req *http.Request, trace string, pick picker, match matcher) (target *Target) { +func (t Table) Lookup(req *http.Request, trace string, pick picker, match matcher, cfg *config.Config) (target *Target) { if trace != "" { if len(trace) > 16 { trace = trace[:15] @@ -352,7 +372,7 @@ func (t Table) Lookup(req *http.Request, trace string, pick picker, match matche // find matching hosts for the request // and add "no host" as the fallback option - hosts := t.matchingHosts(req) + hosts := t.matchingHosts(req, cfg) if trace != "" { log.Printf("[TRACE] %s Matching hosts: %v", trace, hosts) } diff --git a/route/table_test.go b/route/table_test.go index 808c9168b..e0ac39407 100644 --- a/route/table_test.go +++ b/route/table_test.go @@ -3,6 +3,7 @@ package route import ( "crypto/tls" "fmt" + "github.com/fabiolb/fabio/config" "math" "net/http" "reflect" @@ -492,6 +493,8 @@ func TestTableLookupIssue448(t *testing.T) { route add mock ccc.com:443/bar https://ccc.com/baz opts "redirect=301" route add mock / http://foo.com/ ` + //Glob Matching True + globMatching := config.Config{GlobMatching: true} tbl, err := NewTable(s) if err != nil { @@ -551,7 +554,7 @@ func TestTableLookupIssue448(t *testing.T) { } for i, tt := range tests { - if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher).URL.String(), tt.dst; got != want { + if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, &globMatching).URL.String(), tt.dst; got != want { t.Errorf("%d: got %v want %v", i, got, want) } } @@ -573,6 +576,8 @@ func TestTableLookup(t *testing.T) { route add svc *.bbb.abc.com/ http://foo.com:6100 route add svc xyz.com:80/ https://xyz.com ` + //Glob Matching True + globMatching := config.Config{GlobMatching: true} tbl, err := NewTable(s) if err != nil { @@ -626,7 +631,7 @@ func TestTableLookup(t *testing.T) { } for i, tt := range tests { - if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher).URL.String(), tt.dst; got != want { + if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, &globMatching).URL.String(), tt.dst; got != want { t.Errorf("%d: got %v want %v", i, got, want) } } From 2ac199224cd02a8fd71060d5fd01d1c195e0441f Mon Sep 17 00:00:00 2001 From: Alex Samorukov Date: Wed, 19 Sep 2018 17:47:46 +0200 Subject: [PATCH 23/44] Add setting to flush fabio buffer regardless headers (#531) --- config/config.go | 1 + config/default.go | 15 ++++++++------- config/load.go | 1 + config/load_test.go | 7 +++++++ docs/content/ref/proxy.globalflushinterval.md | 10 ++++++++++ fabio.properties | 6 ++++++ proxy/http_proxy.go | 2 +- 7 files changed, 34 insertions(+), 8 deletions(-) create mode 100644 docs/content/ref/proxy.globalflushinterval.md diff --git a/config/config.go b/config/config.go index f8ad7e4c2..fc6486e30 100644 --- a/config/config.go +++ b/config/config.go @@ -59,6 +59,7 @@ type Proxy struct { ResponseHeaderTimeout time.Duration KeepAliveTimeout time.Duration FlushInterval time.Duration + GlobalFlushInterval time.Duration LocalIP string ClientIPHeader string TLSHeader string diff --git a/config/default.go b/config/default.go index 45f68424d..a9756d0bf 100644 --- a/config/default.go +++ b/config/default.go @@ -36,13 +36,14 @@ var defaultConfig = &Config{ }, }, Proxy: Proxy{ - MaxConn: 10000, - Strategy: "rnd", - Matcher: "prefix", - NoRouteStatus: 404, - DialTimeout: 30 * time.Second, - FlushInterval: time.Second, - LocalIP: LocalIPString(), + MaxConn: 10000, + Strategy: "rnd", + Matcher: "prefix", + NoRouteStatus: 404, + DialTimeout: 30 * time.Second, + FlushInterval: time.Second, + GlobalFlushInterval: 0, + LocalIP: LocalIPString(), }, Registry: Registry{ Backend: "consul", diff --git a/config/load.go b/config/load.go index d75188471..098d0c2c1 100644 --- a/config/load.go +++ b/config/load.go @@ -139,6 +139,7 @@ func load(cmdline, environ, envprefix []string, props *properties.Properties) (c f.DurationVar(&readTimeout, "proxy.readtimeout", defaultValues.ReadTimeout, "read timeout for incoming requests") f.DurationVar(&writeTimeout, "proxy.writetimeout", defaultValues.WriteTimeout, "write timeout for outgoing responses") f.DurationVar(&cfg.Proxy.FlushInterval, "proxy.flushinterval", defaultConfig.Proxy.FlushInterval, "flush interval for streaming responses") + f.DurationVar(&cfg.Proxy.GlobalFlushInterval, "proxy.globalflushinterval", defaultConfig.Proxy.GlobalFlushInterval, "flush interval for non-streaming responses") f.StringVar(&cfg.Log.AccessFormat, "log.access.format", defaultConfig.Log.AccessFormat, "access log format") f.StringVar(&cfg.Log.AccessTarget, "log.access.target", defaultConfig.Log.AccessTarget, "access log target") f.StringVar(&cfg.Log.RoutesFormat, "log.routes.format", defaultConfig.Log.RoutesFormat, "log format of routing table updates") diff --git a/config/load_test.go b/config/load_test.go index bf7c9f288..fc17f186d 100644 --- a/config/load_test.go +++ b/config/load_test.go @@ -362,6 +362,13 @@ func TestLoad(t *testing.T) { return cfg }, }, + { + args: []string{"-proxy.globalflushinterval", "5ms"}, + cfg: func(cfg *Config) *Config { + cfg.Proxy.GlobalFlushInterval = 5 * time.Millisecond + return cfg + }, + }, { args: []string{"-proxy.maxconn", "555"}, cfg: func(cfg *Config) *Config { diff --git a/docs/content/ref/proxy.globalflushinterval.md b/docs/content/ref/proxy.globalflushinterval.md new file mode 100644 index 000000000..fc6c50001 --- /dev/null +++ b/docs/content/ref/proxy.globalflushinterval.md @@ -0,0 +1,10 @@ +--- +title: "proxy.globalflushinterval" +--- + +`proxy.globalflushinterval` configures periodic flushing of the +response buffer for proxied non-SSE connections. By default it is disabled. + +The default is + + proxy.globalflushinterval = 0 diff --git a/fabio.properties b/fabio.properties index dae2caab0..703027d3d 100644 --- a/fabio.properties +++ b/fabio.properties @@ -358,6 +358,12 @@ # # proxy.flushinterval = 1s +# proxy.globalflushinterval configures periodic flushing of the +# response buffer for non-SSE connections. By default it is not enabled. +# +# The default is +# +# proxy.globalflushinterval = 0 # proxy.maxconn configures the maximum number of cached # incoming and outgoing connections. diff --git a/proxy/http_proxy.go b/proxy/http_proxy.go index 2991cad56..6730fcc75 100644 --- a/proxy/http_proxy.go +++ b/proxy/http_proxy.go @@ -169,7 +169,7 @@ func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { h = newHTTPProxy(targetURL, tr, p.Config.FlushInterval) default: - h = newHTTPProxy(targetURL, tr, time.Duration(0)) + h = newHTTPProxy(targetURL, tr, p.Config.Proxy.GlobalFlushInterval) } if p.Config.GZIPContentTypes != nil { From 054be87eb902561882d6a5b65321c0a644680a75 Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Wed, 19 Sep 2018 11:24:23 -0500 Subject: [PATCH 24/44] updated docs for glob.matching.enabled --- docs/content/ref/glob.matching.enabled | 0 docs/content/ref/glob.matching.enabled.md | 11 +++++++++++ 2 files changed, 11 insertions(+) create mode 100644 docs/content/ref/glob.matching.enabled create mode 100644 docs/content/ref/glob.matching.enabled.md diff --git a/docs/content/ref/glob.matching.enabled b/docs/content/ref/glob.matching.enabled new file mode 100644 index 000000000..e69de29bb diff --git a/docs/content/ref/glob.matching.enabled.md b/docs/content/ref/glob.matching.enabled.md new file mode 100644 index 000000000..9f7cdac82 --- /dev/null +++ b/docs/content/ref/glob.matching.enabled.md @@ -0,0 +1,11 @@ +--- +title: "glob.matching.enabled" +--- + +`glob.matching.enabled` Enables/Disables glob matching on route lookups. + +Valid options are `true`, `false` + +The default is + + glob.matching.enabled = true From eda6cf412fd0ffe971b30d1d9f85a86f87422ba1 Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Wed, 19 Sep 2018 11:30:22 -0500 Subject: [PATCH 25/44] Updated Docs for glob.matching.enabled --- docs/content/ref/glob.matching.enabled | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/content/ref/glob.matching.enabled diff --git a/docs/content/ref/glob.matching.enabled b/docs/content/ref/glob.matching.enabled deleted file mode 100644 index e69de29bb..000000000 From 0e122d0f634313997c4d74e551d39af353cecbba Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Wed, 19 Sep 2018 21:09:35 +0200 Subject: [PATCH 26/44] fix compile error for #531 --- proxy/http_proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proxy/http_proxy.go b/proxy/http_proxy.go index 6730fcc75..32307594b 100644 --- a/proxy/http_proxy.go +++ b/proxy/http_proxy.go @@ -169,7 +169,7 @@ func (p *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { h = newHTTPProxy(targetURL, tr, p.Config.FlushInterval) default: - h = newHTTPProxy(targetURL, tr, p.Config.Proxy.GlobalFlushInterval) + h = newHTTPProxy(targetURL, tr, p.Config.GlobalFlushInterval) } if p.Config.GZIPContentTypes != nil { From 1d1d76155dfef4937a033a424cede0f2d7a521d4 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Wed, 19 Sep 2018 21:23:44 +0200 Subject: [PATCH 27/44] Upgrade to go1.11 and consul/vault versions for testing --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a2c2aeef7..08f2ae885 100644 --- a/Makefile +++ b/Makefile @@ -21,9 +21,9 @@ GOVERSION = $(shell go version | awk '{print $$3;}') GORELEASER = $(shell which goreleaser) # pin versions for CI builds -CI_CONSUL_VERSION=1.1.0 -CI_VAULT_VERSION=0.10.1 -CI_GO_VERSION=1.10.3 +CI_CONSUL_VERSION=1.2.3 +CI_VAULT_VERSION=0.11.1 +CI_GO_VERSION=1.11 # all is the default target all: test From ea75fb8d3056af0033a42bb49145b1050ebf7425 Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Wed, 19 Sep 2018 16:40:44 -0500 Subject: [PATCH 28/44] updated to pass in bool instead of config.Config to route package --- config/config.go | 22 +++---- config/default.go | 2 +- config/load.go | 2 +- docs/content/ref/glob.matching.disabled.md | 11 ++++ docs/content/ref/glob.matching.enabled.md | 11 ---- fabio.properties | 4 +- main.go | 3 +- proxy/http_integration_test.go | 34 +++++----- proxy/listen_test.go | 6 +- proxy/ws_integration_test.go | 7 +-- route/issue57_test.go | 6 +- route/route_bench_test.go | 5 +- route/table.go | 11 ++-- route/table_test.go | 73 +++++++++++----------- 14 files changed, 92 insertions(+), 105 deletions(-) create mode 100644 docs/content/ref/glob.matching.disabled.md delete mode 100644 docs/content/ref/glob.matching.enabled.md diff --git a/config/config.go b/config/config.go index 8d0fee04c..495614725 100644 --- a/config/config.go +++ b/config/config.go @@ -7,17 +7,17 @@ import ( ) type Config struct { - Proxy Proxy - Registry Registry - Listen []Listen - Log Log - Metrics Metrics - UI UI - Runtime Runtime - ProfileMode string - ProfilePath string - Insecure bool - GlobMatching bool + Proxy Proxy + Registry Registry + Listen []Listen + Log Log + Metrics Metrics + UI UI + Runtime Runtime + ProfileMode string + ProfilePath string + Insecure bool + DisableGlobMatching bool } type CertSource struct { diff --git a/config/default.go b/config/default.go index 55fed35bb..d46ba9bc6 100644 --- a/config/default.go +++ b/config/default.go @@ -77,5 +77,5 @@ var defaultConfig = &Config{ Color: "light-green", Access: "rw", }, - GlobMatching: true, + DisableGlobMatching: false, } diff --git a/config/load.go b/config/load.go index 7992a446a..47f012c58 100644 --- a/config/load.go +++ b/config/load.go @@ -186,7 +186,7 @@ func load(cmdline, environ, envprefix []string, props *properties.Properties) (c f.StringVar(&cfg.UI.Title, "ui.title", defaultConfig.UI.Title, "optional title for the UI") f.StringVar(&cfg.ProfileMode, "profile.mode", defaultConfig.ProfileMode, "enable profiling mode, one of [cpu, mem, mutex, block]") f.StringVar(&cfg.ProfilePath, "profile.path", defaultConfig.ProfilePath, "path to profile dump file") - f.BoolVar(&cfg.GlobMatching, "glob.matching.enabled", defaultConfig.GlobMatching, "Enable/Disable Glob Matching on routes, one of [true, false]") + f.BoolVar(&cfg.DisableGlobMatching, "glob.matching.disabled", defaultConfig.DisableGlobMatching, "Disable Glob Matching on routes, one of [true, false]") // deprecated flags var proxyLogRoutes string diff --git a/docs/content/ref/glob.matching.disabled.md b/docs/content/ref/glob.matching.disabled.md new file mode 100644 index 000000000..5ab2fd06d --- /dev/null +++ b/docs/content/ref/glob.matching.disabled.md @@ -0,0 +1,11 @@ +--- +title: "glob.matching.disabled" +--- + +`glob.matching.disabled` Disables glob matching on route lookups. + +Valid options are `true`, `false` + +The default is + + glob.matching.disabled = false diff --git a/docs/content/ref/glob.matching.enabled.md b/docs/content/ref/glob.matching.enabled.md deleted file mode 100644 index 9f7cdac82..000000000 --- a/docs/content/ref/glob.matching.enabled.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "glob.matching.enabled" ---- - -`glob.matching.enabled` Enables/Disables glob matching on route lookups. - -Valid options are `true`, `false` - -The default is - - glob.matching.enabled = true diff --git a/fabio.properties b/fabio.properties index 2dc80dc85..28f7d994d 100644 --- a/fabio.properties +++ b/fabio.properties @@ -743,7 +743,7 @@ # registry.consul.checksRequired = one -# glob.matching.enabled Enables glob matching on route lookups +# glob.matching.disabled Disables glob matching on route lookups # If glob matching is enabled there is a performance decrease # for every route lookup. At a large number of services (> 500) this # can have a significant impact on performance. If glob matching is disabled @@ -751,7 +751,7 @@ # # The default is # -# glob.matching.enabled = true +# glob.matching.disabled = false # metrics.target configures the backend the metrics values are diff --git a/main.go b/main.go index 769a68997..041102a27 100644 --- a/main.go +++ b/main.go @@ -137,6 +137,7 @@ func main() { func newHTTPProxy(cfg *config.Config) http.Handler { var w io.Writer + globDisabled := cfg.DisableGlobMatching switch cfg.Log.AccessTarget { case "": log.Printf("[INFO] Access logging disabled") @@ -183,7 +184,7 @@ func newHTTPProxy(cfg *config.Config) http.Handler { Transport: newTransport(nil), InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}), Lookup: func(r *http.Request) *route.Target { - t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, cfg) + t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, globDisabled) if t == nil { notFound.Inc(1) log.Print("[WARN] No route for ", r.Host, r.URL) diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index e67280ba2..92cbcce8b 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -176,7 +176,7 @@ func TestProxyNoRouteStatus(t *testing.T) { func TestProxyStripsPath(t *testing.T) { //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.RequestURI { case "/bar": @@ -190,7 +190,7 @@ func TestProxyStripsPath(t *testing.T) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock /foo/bar " + server.URL + ` opts "strip=/foo"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer proxy.Close() @@ -217,8 +217,7 @@ func TestProxyHost(t *testing.T) { routes += "route add mock /hostcustom http://b.com/ opts \"host=bar.com\"\n" routes += "route add mock / http://a.com/" tbl, _ := route.NewTable(routes) - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Transport: &http.Transport{ @@ -228,7 +227,7 @@ func TestProxyHost(t *testing.T) { }, }, Lookup: func(r *http.Request) *route.Target { - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer proxy.Close() @@ -270,14 +269,13 @@ func TestHostRedirect(t *testing.T) { routes := "route add https-redir *:80 https://$host$path opts \"redirect=301\"\n" tbl, _ := route.NewTable(routes) - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { r.Host = "c.com" - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer proxy.Close() @@ -312,13 +310,12 @@ func TestPathRedirect(t *testing.T) { routes += "route add mock /foo http://a.com/abc opts \"redirect=301\"\n" routes += "route add mock /bar http://b.com/$path opts \"redirect=302 strip=/bar\"\n" tbl, _ := route.NewTable(routes) - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer proxy.Close() @@ -480,15 +477,14 @@ func TestProxyHTTPSUpstream(t *testing.T) { server.TLS = tlsServerConfig() server.StartTLS() defer server.Close() - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Config: config.Proxy{}, Transport: &http.Transport{TLSClientConfig: tlsClientConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer proxy.Close() @@ -507,8 +503,7 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { server.TLS = &tls.Config{} server.StartTLS() defer server.Close() - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Config: config.Proxy{}, @@ -518,7 +513,7 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { }, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https tlsskipverify=true"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer proxy.Close() @@ -711,8 +706,7 @@ func BenchmarkProxyLogger(b *testing.B) { b.Fatal("logger.NewHTTPLogger:", err) } - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false proxy := &HTTPProxy{ Config: config.Proxy{ @@ -722,7 +716,7 @@ func BenchmarkProxyLogger(b *testing.B) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock / " + server.URL) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, Logger: l, } diff --git a/proxy/listen_test.go b/proxy/listen_test.go index ae307d27a..c795e51b2 100644 --- a/proxy/listen_test.go +++ b/proxy/listen_test.go @@ -22,8 +22,8 @@ func TestGracefulShutdown(t *testing.T) { })) defer srv.Close() - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false + // start proxy addr := "127.0.0.1:57777" var wg sync.WaitGroup @@ -34,7 +34,7 @@ func TestGracefulShutdown(t *testing.T) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add svc / " + srv.URL) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, } l := config.Listen{Addr: addr} diff --git a/proxy/ws_integration_test.go b/proxy/ws_integration_test.go index f1a9d8035..853371ca2 100644 --- a/proxy/ws_integration_test.go +++ b/proxy/ws_integration_test.go @@ -33,8 +33,7 @@ func TestProxyWSUpstream(t *testing.T) { defer wssServer.Close() t.Log("Started WSS server: ", wssServer.URL) - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false routes := "route add ws /ws " + wsServer.URL + "\n" routes += "route add ws /wss " + wssServer.URL + ` opts "proto=https"` + "\n" @@ -47,7 +46,7 @@ func TestProxyWSUpstream(t *testing.T) { InsecureTransport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable(routes) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) defer httpProxy.Close() @@ -59,7 +58,7 @@ func TestProxyWSUpstream(t *testing.T) { InsecureTransport: &http.Transport{TLSClientConfig: tlsInsecureConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable(routes) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], &globMatching) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) }, }) httpsProxy.TLS = tlsServerConfig() diff --git a/route/issue57_test.go b/route/issue57_test.go index c52835996..e8d412462 100644 --- a/route/issue57_test.go +++ b/route/issue57_test.go @@ -1,7 +1,6 @@ package route import ( - "github.com/fabiolb/fabio/config" "net/http" "testing" ) @@ -25,8 +24,7 @@ func TestIssue57(t *testing.T) { route del svcb`, } - //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false req := &http.Request{URL: mustParse("/foo")} want := "http://foo.com:800" @@ -36,7 +34,7 @@ func TestIssue57(t *testing.T) { if err != nil { t.Fatalf("%d: got %v want nil", i, err) } - target := tbl.Lookup(req, "", rrPicker, prefixMatcher, &globMatching) + target := tbl.Lookup(req, "", rrPicker, prefixMatcher, globDisabled) if target == nil { t.Fatalf("%d: got %v want %v", i, target, want) } diff --git a/route/route_bench_test.go b/route/route_bench_test.go index e11dd32be..2a5b511be 100644 --- a/route/route_bench_test.go +++ b/route/route_bench_test.go @@ -2,7 +2,6 @@ package route import ( "fmt" - "github.com/fabiolb/fabio/config" "net/http" "sync" "testing" @@ -125,9 +124,9 @@ func benchmarkGet(t Table, match matcher, pick picker, pb *testing.PB) { reqs := makeRequests(t) k, n := len(reqs), 0 //Glob Matching True - globMatching := config.Config{GlobMatching: true} + globDisabled := false for pb.Next() { - t.Lookup(reqs[n%k], "", pick, match, &globMatching) + t.Lookup(reqs[n%k], "", pick, match, globDisabled) n++ } } diff --git a/route/table.go b/route/table.go index caaf6b18d..e4485f6f4 100644 --- a/route/table.go +++ b/route/table.go @@ -12,7 +12,6 @@ import ( "sync" "sync/atomic" - "github.com/fabiolb/fabio/config" "github.com/fabiolb/fabio/metrics" "github.com/gobwas/glob" ) @@ -296,7 +295,7 @@ func normalizeHost(host string, tls bool) string { // matchingHosts returns all keys (host name patterns) from the // routing table which match the normalized request hostname. -func (t Table) matchingHosts(req *http.Request, cfg *config.Config) (hosts []string) { +func (t Table) matchingHosts(req *http.Request, globDisabled bool) (hosts []string) { host := normalizeHost(req.Host, req.TLS != nil) // Issue 548 Glob matching causes performance decrease. @@ -304,7 +303,7 @@ func (t Table) matchingHosts(req *http.Request, cfg *config.Config) (hosts []str // Updated config to allow for disabling of Glob Matches // Standard string compare is used if disabled // glob.matching.enabled is false - if !cfg.GlobMatching { + if globDisabled { for pattern := range t { normpat := normalizeHost(pattern, req.TLS != nil) if normpat == host { @@ -316,7 +315,7 @@ func (t Table) matchingHosts(req *http.Request, cfg *config.Config) (hosts []str } else { //glob.matching.enabled is true (default) Performance hit for pattern := range t { normpat := normalizeHost(pattern, req.TLS != nil) - // TODO setup compiled GLOBs in a separate MAP as routes are added/deleted + // TODO setup compiled GLOBs in a separate MAP // TODO Issue #548 g := glob.MustCompile(normpat) if g.Match(host) { @@ -362,7 +361,7 @@ func Reverse(s string) string { // or nil if there is none. It first checks the routes for the host // and if none matches then it falls back to generic routes without // a host. This is useful for a catch-all '/' rule. -func (t Table) Lookup(req *http.Request, trace string, pick picker, match matcher, cfg *config.Config) (target *Target) { +func (t Table) Lookup(req *http.Request, trace string, pick picker, match matcher, globDisabled bool) (target *Target) { if trace != "" { if len(trace) > 16 { trace = trace[:15] @@ -372,7 +371,7 @@ func (t Table) Lookup(req *http.Request, trace string, pick picker, match matche // find matching hosts for the request // and add "no host" as the fallback option - hosts := t.matchingHosts(req, cfg) + hosts := t.matchingHosts(req, globDisabled) if trace != "" { log.Printf("[TRACE] %s Matching hosts: %v", trace, hosts) } diff --git a/route/table_test.go b/route/table_test.go index e0ac39407..d9ef05429 100644 --- a/route/table_test.go +++ b/route/table_test.go @@ -3,7 +3,6 @@ package route import ( "crypto/tls" "fmt" - "github.com/fabiolb/fabio/config" "math" "net/http" "reflect" @@ -493,8 +492,6 @@ func TestTableLookupIssue448(t *testing.T) { route add mock ccc.com:443/bar https://ccc.com/baz opts "redirect=301" route add mock / http://foo.com/ ` - //Glob Matching True - globMatching := config.Config{GlobMatching: true} tbl, err := NewTable(s) if err != nil { @@ -502,8 +499,9 @@ func TestTableLookupIssue448(t *testing.T) { } var tests = []struct { - req *http.Request - dst string + req *http.Request + dst string + globDisabled bool }{ { req: &http.Request{ @@ -554,7 +552,7 @@ func TestTableLookupIssue448(t *testing.T) { } for i, tt := range tests { - if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, &globMatching).URL.String(), tt.dst; got != want { + if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, tt.globDisabled).URL.String(), tt.dst; got != want { t.Errorf("%d: got %v want %v", i, got, want) } } @@ -576,8 +574,6 @@ func TestTableLookup(t *testing.T) { route add svc *.bbb.abc.com/ http://foo.com:6100 route add svc xyz.com:80/ https://xyz.com ` - //Glob Matching True - globMatching := config.Config{GlobMatching: true} tbl, err := NewTable(s) if err != nil { @@ -585,53 +581,54 @@ func TestTableLookup(t *testing.T) { } var tests = []struct { - req *http.Request - dst string + req *http.Request + dst string + globDisabled bool }{ // match on host and path with and without trailing slash - {&http.Request{Host: "abc.com", URL: mustParse("/")}, "http://foo.com:1000"}, - {&http.Request{Host: "abc.com", URL: mustParse("/bar")}, "http://foo.com:1000"}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo")}, "http://foo.com:1500"}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo/")}, "http://foo.com:2000"}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar")}, "http://foo.com:2500"}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar/")}, "http://foo.com:3000"}, + {&http.Request{Host: "abc.com", URL: mustParse("/")}, "http://foo.com:1000", false}, + {&http.Request{Host: "abc.com", URL: mustParse("/bar")}, "http://foo.com:1000", false}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo")}, "http://foo.com:1500", false}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo/")}, "http://foo.com:2000", false}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar")}, "http://foo.com:2500", false}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar/")}, "http://foo.com:3000", false}, // do not match on host but maybe on path - {&http.Request{Host: "def.com", URL: mustParse("/")}, "http://foo.com:800"}, - {&http.Request{Host: "def.com", URL: mustParse("/bar")}, "http://foo.com:800"}, - {&http.Request{Host: "def.com", URL: mustParse("/foo")}, "http://foo.com:900"}, + {&http.Request{Host: "def.com", URL: mustParse("/")}, "http://foo.com:800", false}, + {&http.Request{Host: "def.com", URL: mustParse("/bar")}, "http://foo.com:800", false}, + {&http.Request{Host: "def.com", URL: mustParse("/foo")}, "http://foo.com:900", false}, // strip default port - {&http.Request{Host: "abc.com:80", URL: mustParse("/")}, "http://foo.com:1000"}, - {&http.Request{Host: "abc.com:443", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:1000"}, + {&http.Request{Host: "abc.com:80", URL: mustParse("/")}, "http://foo.com:1000", false}, + {&http.Request{Host: "abc.com:443", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:1000", false}, // not using default port - {&http.Request{Host: "abc.com:443", URL: mustParse("/")}, "http://foo.com:800"}, - {&http.Request{Host: "abc.com:80", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:800"}, + {&http.Request{Host: "abc.com:443", URL: mustParse("/")}, "http://foo.com:800", false}, + {&http.Request{Host: "abc.com:80", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:800", false}, // glob match the host - {&http.Request{Host: "x.abc.com", URL: mustParse("/")}, "http://foo.com:4000"}, - {&http.Request{Host: "y.abc.com", URL: mustParse("/abc")}, "http://foo.com:4000"}, - {&http.Request{Host: "x.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000"}, - {&http.Request{Host: "y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000"}, - {&http.Request{Host: ".abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000"}, - {&http.Request{Host: "x.y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000"}, - {&http.Request{Host: "y.abc.com:80", URL: mustParse("/foo/")}, "http://foo.com:5000"}, - {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/")}, "http://foo.com:6000"}, - {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/foo")}, "http://foo.com:6000"}, - {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/")}, "http://foo.com:6100"}, - {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/foo")}, "http://foo.com:6100"}, - {&http.Request{Host: "y.abc.com:443", URL: mustParse("/foo/"), TLS: &tls.ConnectionState{}}, "http://foo.com:5000"}, + {&http.Request{Host: "x.abc.com", URL: mustParse("/")}, "http://foo.com:4000", false}, + {&http.Request{Host: "y.abc.com", URL: mustParse("/abc")}, "http://foo.com:4000", false}, + {&http.Request{Host: "x.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, + {&http.Request{Host: "y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, + {&http.Request{Host: ".abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, + {&http.Request{Host: "x.y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, + {&http.Request{Host: "y.abc.com:80", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, + {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/")}, "http://foo.com:6000", false}, + {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/foo")}, "http://foo.com:6000", false}, + {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/")}, "http://foo.com:6100", false}, + {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/foo")}, "http://foo.com:6100", false}, + {&http.Request{Host: "y.abc.com:443", URL: mustParse("/foo/"), TLS: &tls.ConnectionState{}}, "http://foo.com:5000", false}, // exact match has precedence over glob match - {&http.Request{Host: "z.abc.com", URL: mustParse("/foo/")}, "http://foo.com:3100"}, + {&http.Request{Host: "z.abc.com", URL: mustParse("/foo/")}, "http://foo.com:3100", false}, // explicit port on route - {&http.Request{Host: "xyz.com", URL: mustParse("/")}, "https://xyz.com"}, + {&http.Request{Host: "xyz.com", URL: mustParse("/")}, "https://xyz.com", false}, } for i, tt := range tests { - if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, &globMatching).URL.String(), tt.dst; got != want { + if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, tt.globDisabled).URL.String(), tt.dst; got != want { t.Errorf("%d: got %v want %v", i, got, want) } } From 1eb2e36e40964e34d71df57fc926d78b6345c573 Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Thu, 20 Sep 2018 09:54:59 -0500 Subject: [PATCH 29/44] added separate func for no glob matching --- route/table.go | 63 +++++++++++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/route/table.go b/route/table.go index e4485f6f4..8c07fb9b0 100644 --- a/route/table.go +++ b/route/table.go @@ -295,32 +295,15 @@ func normalizeHost(host string, tls bool) string { // matchingHosts returns all keys (host name patterns) from the // routing table which match the normalized request hostname. -func (t Table) matchingHosts(req *http.Request, globDisabled bool) (hosts []string) { +func (t Table) matchingHosts(req *http.Request,) (hosts []string) { host := normalizeHost(req.Host, req.TLS != nil) - - // Issue 548 Glob matching causes performance decrease. - // - // Updated config to allow for disabling of Glob Matches - // Standard string compare is used if disabled - // glob.matching.enabled is false - if globDisabled { - for pattern := range t { - normpat := normalizeHost(pattern, req.TLS != nil) - if normpat == host { - //log.Printf("DEBUG Matched %s and %s", normpat, host) - hosts = append(hosts, pattern) - return - } - } - } else { //glob.matching.enabled is true (default) Performance hit - for pattern := range t { - normpat := normalizeHost(pattern, req.TLS != nil) - // TODO setup compiled GLOBs in a separate MAP - // TODO Issue #548 - g := glob.MustCompile(normpat) - if g.Match(host) { - hosts = append(hosts, pattern) - } + for pattern := range t { + normpat := normalizeHost(pattern, req.TLS != nil) + // TODO setup compiled GLOBs in a separate MAP + // TODO Issue #548 + g := glob.MustCompile(normpat) + if g.Match(host) { + hosts = append(hosts, pattern) } } @@ -346,6 +329,25 @@ func (t Table) matchingHosts(req *http.Request, globDisabled bool) (hosts []stri return } + +// Issue 548 - Added separate func +// +// matchingHostNoGlob returns the route from the +// routing table which matches the normalized request hostname. +func (t Table) matchingHostNoGlob(req *http.Request) (hosts []string) { + host := normalizeHost(req.Host, req.TLS != nil) + + for pattern := range t { + normpat := normalizeHost(pattern, req.TLS != nil) + if normpat == host { + //log.Printf("DEBUG Matched %s and %s", normpat, host) + hosts = append(hosts, pattern) + return + } + } + return +} + // Reverse returns its argument string reversed rune-wise left to right. // // taken from https://github.com/golang/example/blob/master/stringutil/reverse.go @@ -362,6 +364,8 @@ func Reverse(s string) string { // and if none matches then it falls back to generic routes without // a host. This is useful for a catch-all '/' rule. func (t Table) Lookup(req *http.Request, trace string, pick picker, match matcher, globDisabled bool) (target *Target) { + + var hosts []string if trace != "" { if len(trace) > 16 { trace = trace[:15] @@ -371,7 +375,14 @@ func (t Table) Lookup(req *http.Request, trace string, pick picker, match matche // find matching hosts for the request // and add "no host" as the fallback option - hosts := t.matchingHosts(req, globDisabled) + // if globDisabled then match without Glob + // Issue 548 + if globDisabled { + hosts = t.matchingHostNoGlob(req) + } else { + hosts = t.matchingHosts(req) + } + if trace != "" { log.Printf("[TRACE] %s Matching hosts: %v", trace, hosts) } From da6726851704258a98d60bae7edc5608f249ae86 Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Thu, 20 Sep 2018 14:43:25 -0500 Subject: [PATCH 30/44] updated test with CONST, var clean up --- config/default.go | 1 - docs/content/ref/glob.matching.disabled.md | 2 +- fabio.properties | 2 +- main.go | 3 +- proxy/http_integration_test.go | 31 +++++----- route/issue57_test.go | 3 +- route/route_bench_test.go | 3 +- route/table_test.go | 68 ++++++++++++---------- 8 files changed, 57 insertions(+), 56 deletions(-) diff --git a/config/default.go b/config/default.go index d46ba9bc6..45f68424d 100644 --- a/config/default.go +++ b/config/default.go @@ -77,5 +77,4 @@ var defaultConfig = &Config{ Color: "light-green", Access: "rw", }, - DisableGlobMatching: false, } diff --git a/docs/content/ref/glob.matching.disabled.md b/docs/content/ref/glob.matching.disabled.md index 5ab2fd06d..ec1e41e00 100644 --- a/docs/content/ref/glob.matching.disabled.md +++ b/docs/content/ref/glob.matching.disabled.md @@ -2,7 +2,7 @@ title: "glob.matching.disabled" --- -`glob.matching.disabled` Disables glob matching on route lookups. +`glob.matching.disabled` disables glob matching on route lookups. Valid options are `true`, `false` diff --git a/fabio.properties b/fabio.properties index 28f7d994d..207c17da3 100644 --- a/fabio.properties +++ b/fabio.properties @@ -743,7 +743,7 @@ # registry.consul.checksRequired = one -# glob.matching.disabled Disables glob matching on route lookups +# glob.matching.disabled disables glob matching on route lookups # If glob matching is enabled there is a performance decrease # for every route lookup. At a large number of services (> 500) this # can have a significant impact on performance. If glob matching is disabled diff --git a/main.go b/main.go index 041102a27..3ccf43b97 100644 --- a/main.go +++ b/main.go @@ -137,7 +137,6 @@ func main() { func newHTTPProxy(cfg *config.Config) http.Handler { var w io.Writer - globDisabled := cfg.DisableGlobMatching switch cfg.Log.AccessTarget { case "": log.Printf("[INFO] Access logging disabled") @@ -184,7 +183,7 @@ func newHTTPProxy(cfg *config.Config) http.Handler { Transport: newTransport(nil), InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}), Lookup: func(r *http.Request) *route.Target { - t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, globDisabled) + t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, cfg.DisableGlobMatching) if t == nil { notFound.Inc(1) log.Print("[WARN] No route for ", r.Host, r.URL) diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index 92cbcce8b..4f223ffb2 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -27,6 +27,13 @@ import ( "github.com/pascaldekloe/goe/verify" ) + +const ( + // helper constants for the Lookup function + globEnabled = false + globDisabled = true +) + func TestProxyProducesCorrectXForwardedSomethingHeader(t *testing.T) { var hdr http.Header = make(http.Header) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -175,8 +182,6 @@ func TestProxyNoRouteStatus(t *testing.T) { } func TestProxyStripsPath(t *testing.T) { - //Glob Matching True - globDisabled := false server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.RequestURI { case "/bar": @@ -190,7 +195,7 @@ func TestProxyStripsPath(t *testing.T) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock /foo/bar " + server.URL + ` opts "strip=/foo"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, }) defer proxy.Close() @@ -217,7 +222,6 @@ func TestProxyHost(t *testing.T) { routes += "route add mock /hostcustom http://b.com/ opts \"host=bar.com\"\n" routes += "route add mock / http://a.com/" tbl, _ := route.NewTable(routes) - globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Transport: &http.Transport{ @@ -227,7 +231,7 @@ func TestProxyHost(t *testing.T) { }, }, Lookup: func(r *http.Request) *route.Target { - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, }) defer proxy.Close() @@ -269,13 +273,12 @@ func TestHostRedirect(t *testing.T) { routes := "route add https-redir *:80 https://$host$path opts \"redirect=301\"\n" tbl, _ := route.NewTable(routes) - globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { r.Host = "c.com" - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, }) defer proxy.Close() @@ -310,12 +313,11 @@ func TestPathRedirect(t *testing.T) { routes += "route add mock /foo http://a.com/abc opts \"redirect=301\"\n" routes += "route add mock /bar http://b.com/$path opts \"redirect=302 strip=/bar\"\n" tbl, _ := route.NewTable(routes) - globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, }) defer proxy.Close() @@ -477,14 +479,13 @@ func TestProxyHTTPSUpstream(t *testing.T) { server.TLS = tlsServerConfig() server.StartTLS() defer server.Close() - globDisabled := false proxy := httptest.NewServer(&HTTPProxy{ Config: config.Proxy{}, Transport: &http.Transport{TLSClientConfig: tlsClientConfig()}, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, }) defer proxy.Close() @@ -503,8 +504,6 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { server.TLS = &tls.Config{} server.StartTLS() defer server.Close() - globDisabled := false - proxy := httptest.NewServer(&HTTPProxy{ Config: config.Proxy{}, Transport: http.DefaultTransport, @@ -513,7 +512,7 @@ func TestProxyHTTPSUpstreamSkipVerify(t *testing.T) { }, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add srv / " + server.URL + ` opts "proto=https tlsskipverify=true"`) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, }) defer proxy.Close() @@ -706,7 +705,7 @@ func BenchmarkProxyLogger(b *testing.B) { b.Fatal("logger.NewHTTPLogger:", err) } - globDisabled := false + proxy := &HTTPProxy{ Config: config.Proxy{ @@ -716,7 +715,7 @@ func BenchmarkProxyLogger(b *testing.B) { Transport: http.DefaultTransport, Lookup: func(r *http.Request) *route.Target { tbl, _ := route.NewTable("route add mock / " + server.URL) - return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globDisabled) + return tbl.Lookup(r, "", route.Picker["rr"], route.Matcher["prefix"], globEnabled) }, Logger: l, } diff --git a/route/issue57_test.go b/route/issue57_test.go index e8d412462..17082b4d5 100644 --- a/route/issue57_test.go +++ b/route/issue57_test.go @@ -24,7 +24,6 @@ func TestIssue57(t *testing.T) { route del svcb`, } - globDisabled := false req := &http.Request{URL: mustParse("/foo")} want := "http://foo.com:800" @@ -34,7 +33,7 @@ func TestIssue57(t *testing.T) { if err != nil { t.Fatalf("%d: got %v want nil", i, err) } - target := tbl.Lookup(req, "", rrPicker, prefixMatcher, globDisabled) + target := tbl.Lookup(req, "", rrPicker, prefixMatcher, globEnabled) if target == nil { t.Fatalf("%d: got %v want %v", i, target, want) } diff --git a/route/route_bench_test.go b/route/route_bench_test.go index 2a5b511be..e5693c572 100644 --- a/route/route_bench_test.go +++ b/route/route_bench_test.go @@ -124,9 +124,8 @@ func benchmarkGet(t Table, match matcher, pick picker, pb *testing.PB) { reqs := makeRequests(t) k, n := len(reqs), 0 //Glob Matching True - globDisabled := false for pb.Next() { - t.Lookup(reqs[n%k], "", pick, match, globDisabled) + t.Lookup(reqs[n%k], "", pick, match, globEnabled) n++ } } diff --git a/route/table_test.go b/route/table_test.go index d9ef05429..7d20ecfc6 100644 --- a/route/table_test.go +++ b/route/table_test.go @@ -11,6 +11,12 @@ import ( "testing" ) +const ( + // helper constants for the Lookup function + globEnabled = false + globDisabled = true +) + func TestTableParse(t *testing.T) { genRoutes := func(n int, format string) (a []string) { for i := 0; i < n; i++ { @@ -501,7 +507,7 @@ func TestTableLookupIssue448(t *testing.T) { var tests = []struct { req *http.Request dst string - globDisabled bool + globEnabled bool }{ { req: &http.Request{ @@ -552,7 +558,7 @@ func TestTableLookupIssue448(t *testing.T) { } for i, tt := range tests { - if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, tt.globDisabled).URL.String(), tt.dst; got != want { + if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, globEnabled).URL.String(), tt.dst; got != want { t.Errorf("%d: got %v want %v", i, got, want) } } @@ -583,52 +589,52 @@ func TestTableLookup(t *testing.T) { var tests = []struct { req *http.Request dst string - globDisabled bool + globEnabled bool }{ // match on host and path with and without trailing slash - {&http.Request{Host: "abc.com", URL: mustParse("/")}, "http://foo.com:1000", false}, - {&http.Request{Host: "abc.com", URL: mustParse("/bar")}, "http://foo.com:1000", false}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo")}, "http://foo.com:1500", false}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo/")}, "http://foo.com:2000", false}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar")}, "http://foo.com:2500", false}, - {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar/")}, "http://foo.com:3000", false}, + {&http.Request{Host: "abc.com", URL: mustParse("/")}, "http://foo.com:1000", globEnabled}, + {&http.Request{Host: "abc.com", URL: mustParse("/bar")}, "http://foo.com:1000", globEnabled}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo")}, "http://foo.com:1500", globEnabled}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo/")}, "http://foo.com:2000", globEnabled}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar")}, "http://foo.com:2500", globEnabled}, + {&http.Request{Host: "abc.com", URL: mustParse("/foo/bar/")}, "http://foo.com:3000", globEnabled}, // do not match on host but maybe on path - {&http.Request{Host: "def.com", URL: mustParse("/")}, "http://foo.com:800", false}, - {&http.Request{Host: "def.com", URL: mustParse("/bar")}, "http://foo.com:800", false}, - {&http.Request{Host: "def.com", URL: mustParse("/foo")}, "http://foo.com:900", false}, + {&http.Request{Host: "def.com", URL: mustParse("/")}, "http://foo.com:800", globEnabled}, + {&http.Request{Host: "def.com", URL: mustParse("/bar")}, "http://foo.com:800", globEnabled}, + {&http.Request{Host: "def.com", URL: mustParse("/foo")}, "http://foo.com:900", globEnabled}, // strip default port - {&http.Request{Host: "abc.com:80", URL: mustParse("/")}, "http://foo.com:1000", false}, - {&http.Request{Host: "abc.com:443", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:1000", false}, + {&http.Request{Host: "abc.com:80", URL: mustParse("/")}, "http://foo.com:1000", globEnabled}, + {&http.Request{Host: "abc.com:443", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:1000", globEnabled}, // not using default port - {&http.Request{Host: "abc.com:443", URL: mustParse("/")}, "http://foo.com:800", false}, - {&http.Request{Host: "abc.com:80", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:800", false}, + {&http.Request{Host: "abc.com:443", URL: mustParse("/")}, "http://foo.com:800", globEnabled}, + {&http.Request{Host: "abc.com:80", URL: mustParse("/"), TLS: &tls.ConnectionState{}}, "http://foo.com:800", globEnabled}, // glob match the host - {&http.Request{Host: "x.abc.com", URL: mustParse("/")}, "http://foo.com:4000", false}, - {&http.Request{Host: "y.abc.com", URL: mustParse("/abc")}, "http://foo.com:4000", false}, - {&http.Request{Host: "x.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, - {&http.Request{Host: "y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, - {&http.Request{Host: ".abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, - {&http.Request{Host: "x.y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, - {&http.Request{Host: "y.abc.com:80", URL: mustParse("/foo/")}, "http://foo.com:5000", false}, - {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/")}, "http://foo.com:6000", false}, - {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/foo")}, "http://foo.com:6000", false}, - {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/")}, "http://foo.com:6100", false}, - {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/foo")}, "http://foo.com:6100", false}, - {&http.Request{Host: "y.abc.com:443", URL: mustParse("/foo/"), TLS: &tls.ConnectionState{}}, "http://foo.com:5000", false}, + {&http.Request{Host: "x.abc.com", URL: mustParse("/")}, "http://foo.com:4000", globEnabled}, + {&http.Request{Host: "y.abc.com", URL: mustParse("/abc")}, "http://foo.com:4000", globEnabled}, + {&http.Request{Host: "x.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", globEnabled}, + {&http.Request{Host: "y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", globEnabled}, + {&http.Request{Host: ".abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", globEnabled}, + {&http.Request{Host: "x.y.abc.com", URL: mustParse("/foo/")}, "http://foo.com:5000", globEnabled}, + {&http.Request{Host: "y.abc.com:80", URL: mustParse("/foo/")}, "http://foo.com:5000", globEnabled}, + {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/")}, "http://foo.com:6000", globEnabled}, + {&http.Request{Host: "x.aaa.abc.com", URL: mustParse("/foo")}, "http://foo.com:6000", globEnabled}, + {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/")}, "http://foo.com:6100", globEnabled}, + {&http.Request{Host: "x.bbb.abc.com", URL: mustParse("/foo")}, "http://foo.com:6100", globEnabled}, + {&http.Request{Host: "y.abc.com:443", URL: mustParse("/foo/"), TLS: &tls.ConnectionState{}}, "http://foo.com:5000", globEnabled}, // exact match has precedence over glob match - {&http.Request{Host: "z.abc.com", URL: mustParse("/foo/")}, "http://foo.com:3100", false}, + {&http.Request{Host: "z.abc.com", URL: mustParse("/foo/")}, "http://foo.com:3100", globEnabled}, // explicit port on route - {&http.Request{Host: "xyz.com", URL: mustParse("/")}, "https://xyz.com", false}, + {&http.Request{Host: "xyz.com", URL: mustParse("/")}, "https://xyz.com", globEnabled}, } for i, tt := range tests { - if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, tt.globDisabled).URL.String(), tt.dst; got != want { + if got, want := tbl.Lookup(tt.req, "", rndPicker, prefixMatcher, tt.globEnabled).URL.String(), tt.dst; got != want { t.Errorf("%d: got %v want %v", i, got, want) } } From 89201539183df7ba05f8a1cfc3704297914b38fb Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Thu, 20 Sep 2018 14:48:50 -0500 Subject: [PATCH 31/44] go fmt of files --- proxy/http_integration_test.go | 5 +---- route/issue57_test.go | 1 - route/table.go | 3 +-- route/table_test.go | 10 +++++----- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/proxy/http_integration_test.go b/proxy/http_integration_test.go index 4f223ffb2..2132d3dd9 100644 --- a/proxy/http_integration_test.go +++ b/proxy/http_integration_test.go @@ -27,10 +27,9 @@ import ( "github.com/pascaldekloe/goe/verify" ) - const ( // helper constants for the Lookup function - globEnabled = false + globEnabled = false globDisabled = true ) @@ -705,8 +704,6 @@ func BenchmarkProxyLogger(b *testing.B) { b.Fatal("logger.NewHTTPLogger:", err) } - - proxy := &HTTPProxy{ Config: config.Proxy{ LocalIP: "1.1.1.1", diff --git a/route/issue57_test.go b/route/issue57_test.go index 17082b4d5..c87dd8b00 100644 --- a/route/issue57_test.go +++ b/route/issue57_test.go @@ -24,7 +24,6 @@ func TestIssue57(t *testing.T) { route del svcb`, } - req := &http.Request{URL: mustParse("/foo")} want := "http://foo.com:800" diff --git a/route/table.go b/route/table.go index 8c07fb9b0..4e12fc182 100644 --- a/route/table.go +++ b/route/table.go @@ -295,7 +295,7 @@ func normalizeHost(host string, tls bool) string { // matchingHosts returns all keys (host name patterns) from the // routing table which match the normalized request hostname. -func (t Table) matchingHosts(req *http.Request,) (hosts []string) { +func (t Table) matchingHosts(req *http.Request) (hosts []string) { host := normalizeHost(req.Host, req.TLS != nil) for pattern := range t { normpat := normalizeHost(pattern, req.TLS != nil) @@ -329,7 +329,6 @@ func (t Table) matchingHosts(req *http.Request,) (hosts []string) { return } - // Issue 548 - Added separate func // // matchingHostNoGlob returns the route from the diff --git a/route/table_test.go b/route/table_test.go index 7d20ecfc6..824693c48 100644 --- a/route/table_test.go +++ b/route/table_test.go @@ -13,7 +13,7 @@ import ( const ( // helper constants for the Lookup function - globEnabled = false + globEnabled = false globDisabled = true ) @@ -505,8 +505,8 @@ func TestTableLookupIssue448(t *testing.T) { } var tests = []struct { - req *http.Request - dst string + req *http.Request + dst string globEnabled bool }{ { @@ -587,8 +587,8 @@ func TestTableLookup(t *testing.T) { } var tests = []struct { - req *http.Request - dst string + req *http.Request + dst string globEnabled bool }{ // match on host and path with and without trailing slash From c57ea716b6efca0ee634c81514cd512242293e90 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 20 Sep 2018 21:59:17 +0200 Subject: [PATCH 32/44] fix variable name --- config/config.go | 22 +++++++++++----------- config/load.go | 2 +- main.go | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/config/config.go b/config/config.go index 7a2efd3bb..eb8575558 100644 --- a/config/config.go +++ b/config/config.go @@ -7,17 +7,17 @@ import ( ) type Config struct { - Proxy Proxy - Registry Registry - Listen []Listen - Log Log - Metrics Metrics - UI UI - Runtime Runtime - ProfileMode string - ProfilePath string - Insecure bool - DisableGlobMatching bool + Proxy Proxy + Registry Registry + Listen []Listen + Log Log + Metrics Metrics + UI UI + Runtime Runtime + ProfileMode string + ProfilePath string + Insecure bool + GlobMatchingDisabled bool } type CertSource struct { diff --git a/config/load.go b/config/load.go index 3db6fb3d4..d641dc0ec 100644 --- a/config/load.go +++ b/config/load.go @@ -187,7 +187,7 @@ func load(cmdline, environ, envprefix []string, props *properties.Properties) (c f.StringVar(&cfg.UI.Title, "ui.title", defaultConfig.UI.Title, "optional title for the UI") f.StringVar(&cfg.ProfileMode, "profile.mode", defaultConfig.ProfileMode, "enable profiling mode, one of [cpu, mem, mutex, block]") f.StringVar(&cfg.ProfilePath, "profile.path", defaultConfig.ProfilePath, "path to profile dump file") - f.BoolVar(&cfg.DisableGlobMatching, "glob.matching.disabled", defaultConfig.DisableGlobMatching, "Disable Glob Matching on routes, one of [true, false]") + f.BoolVar(&cfg.GlobMatchingDisabled, "glob.matching.disabled", defaultConfig.GlobMatchingDisabled, "Disable Glob Matching on routes, one of [true, false]") // deprecated flags var proxyLogRoutes string diff --git a/main.go b/main.go index 3ccf43b97..b557b1219 100644 --- a/main.go +++ b/main.go @@ -183,7 +183,7 @@ func newHTTPProxy(cfg *config.Config) http.Handler { Transport: newTransport(nil), InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}), Lookup: func(r *http.Request) *route.Target { - t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, cfg.DisableGlobMatching) + t := route.GetTable().Lookup(r, r.Header.Get("trace"), pick, match, cfg.GlobMatchingDisabled) if t == nil { notFound.Inc(1) log.Print("[WARN] No route for ", r.Host, r.URL) From 5619989d4d56a0b8510ac512c232390a5ad50b06 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 19:48:05 +0200 Subject: [PATCH 33/44] Update CHANGELOG --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f1f0ec504..cd4304024 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,17 @@ #### Improvements + * [PR #497](https://github.com/fabiolb/fabio/pull/497): Make tests pass with latest Consul and Vault versions + + Thanks to [@pschultz](https://github.com/pschultz) for the patch. + + * [PR #531](https://github.com/fabiolb/fabio/pull/531): Set flush buffer interval for non-SSE requests + + This PR adds a `proxy.globalflushinterval` option to configure an interval when the HTTP Response + Buffer is flushed. + + Thanks to [@samm-git](https://github.com/samm-git) for the patch. + * [Issue #542](https://github.com/fabiolb/fabio/issues/542): Ignore host case when adding and matching routes Fabio was forcing hostnames in routes added via Consul tags to lowercase. This caused problems @@ -31,6 +42,15 @@ Thanks to [@shantanugadgil](https://github.com/shantanugadgil) for the patch. + * [Issue #548](https://github.com/fabiolb/fabio/issues/548): Slow glob matching with large number of services + + This patch adds the new `glob.matching.enabled` option which controls whether glob matching is enabled for + route lookups. If the number of routes is large then the glob matching can have a performance impact and + disabling it may help. + + Thanks to [@galen0624](https://github.com/galen0624) for the patch and + [@leprechau](https://github.com/leprechau) for the review. + #### Features * [Issue #544](https://github.com/fabiolb/fabio/issues/544): Add $host pseudo variable @@ -322,6 +342,7 @@ urlprefix-/foo redirect=301,https://www.foo.com$path * [PR #315](https://github.com/fabiolb/fabio/pull/315)/[Issue #135](https://github.com/fabiolb/fabio/issues/135): Vault PKI cert source This adds support for using [Vault](https://vaultproject.io/) as a PKI cert source. + Thanks to [@pschultz](https://github.com/pschultz) for providing this patch! #### Bug Fixes From 3690f6d1abe288178478b4ab27da26f11bf41e34 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:21:11 +0200 Subject: [PATCH 34/44] upgrade ci build to latest versions --- Dockerfile-test | 15 --------------- Makefile | 6 +++--- 2 files changed, 3 insertions(+), 18 deletions(-) delete mode 100644 Dockerfile-test diff --git a/Dockerfile-test b/Dockerfile-test deleted file mode 100644 index b9b04ca46..000000000 --- a/Dockerfile-test +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu -RUN apt-get update && apt-get -y install make git-core unzip -ARG consul_version=1.0.6 -ARG vault_version=0.9.6 -ARG go_version=1.10.3 -ADD https://releases.hashicorp.com/consul/${consul_version}/consul_${consul_version}_linux_amd64.zip /usr/local/bin -ADD https://releases.hashicorp.com/vault/${vault_version}/vault_${vault_version}_linux_amd64.zip /usr/local/bin -ADD https://dl.google.com/go/go${go_version}.linux-amd64.tar.gz /usr/local -RUN cd /usr/local/bin && unzip consul_${consul_version}_linux_amd64.zip -RUN cd /usr/local/bin && unzip vault_${vault_version}_linux_amd64.zip -RUN cd /usr/local && tar xf go${go_version}.linux-amd64.tar.gz -ENV PATH=/usr/local/go/bin:/root/go/bin:$PATH -WORKDIR /root/go/src/github.com/fabiolb/fabio -COPY . . -CMD "/bin/bash" diff --git a/Makefile b/Makefile index 08f2ae885..86266fcc6 100644 --- a/Makefile +++ b/Makefile @@ -21,9 +21,9 @@ GOVERSION = $(shell go version | awk '{print $$3;}') GORELEASER = $(shell which goreleaser) # pin versions for CI builds -CI_CONSUL_VERSION=1.2.3 -CI_VAULT_VERSION=0.11.1 -CI_GO_VERSION=1.11 +CI_CONSUL_VERSION=1.3.0 +CI_VAULT_VERSION=0.11.4 +CI_GO_VERSION=1.11.1 # all is the default target all: test From 2ae2c82f7f29dc435cd4f51851485c30527079ae Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:21:25 +0200 Subject: [PATCH 35/44] make Dockerfile more useful --- Dockerfile | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1bc6c619f..d9e1395b8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,22 @@ -FROM scratch -ADD ca-certificates.crt /etc/ssl/certs/ +FROM golang:1.11.1-alpine AS build + +ARG consul_version=1.3.0 +ADD https://releases.hashicorp.com/consul/${consul_version}/consul_${consul_version}_linux_amd64.zip /usr/local/bin +RUN cd /usr/local/bin && unzip consul_${consul_version}_linux_amd64.zip + +ARG vault_version=0.11.4 +ADD https://releases.hashicorp.com/vault/${vault_version}/vault_${vault_version}_linux_amd64.zip /usr/local/bin +RUN cd /usr/local/bin && unzip vault_${vault_version}_linux_amd64.zip + +WORKDIR /go/src/github.com/fabiolb/fabio +COPY . . +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go test -ldflags "-s -w" ./... +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" + +FROM alpine:3.8 +RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* +COPY --from=build /go/src/github.com/fabiolb/fabio/fabio /usr/bin ADD fabio.properties /etc/fabio/fabio.properties -ADD fabio / EXPOSE 9998 9999 -CMD ["/fabio", "-cfg", "/etc/fabio/fabio.properties"] +ENTRYPOINT ["/usr/bin/fabio"] +CMD ["-cfg", "/etc/fabio/fabio.properties"] From 4b961dbbca312b1373ea26c50c1a959502bd924c Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:23:53 +0200 Subject: [PATCH 36/44] Update CHANGELOG --- CHANGELOG.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd4304024..b378ca901 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,13 +21,17 @@ #### Improvements + * The default Docker image is now based on alpine:3.8 and runs the full test suite during build. It also sets + `/usr/bin/fabio` as `ENTRYPOINT` with `-cfg /etc/fabio/fabio.properties` as default command line arguments. + The previous image was built on `scratch`. + * [PR #497](https://github.com/fabiolb/fabio/pull/497): Make tests pass with latest Consul and Vault versions - + Thanks to [@pschultz](https://github.com/pschultz) for the patch. * [PR #531](https://github.com/fabiolb/fabio/pull/531): Set flush buffer interval for non-SSE requests - This PR adds a `proxy.globalflushinterval` option to configure an interval when the HTTP Response + This PR adds a `proxy.globalflushinterval` option to configure an interval when the HTTP Response Buffer is flushed. Thanks to [@samm-git](https://github.com/samm-git) for the patch. @@ -43,12 +47,12 @@ Thanks to [@shantanugadgil](https://github.com/shantanugadgil) for the patch. * [Issue #548](https://github.com/fabiolb/fabio/issues/548): Slow glob matching with large number of services - + This patch adds the new `glob.matching.enabled` option which controls whether glob matching is enabled for route lookups. If the number of routes is large then the glob matching can have a performance impact and disabling it may help. - Thanks to [@galen0624](https://github.com/galen0624) for the patch and + Thanks to [@galen0624](https://github.com/galen0624) for the patch and [@leprechau](https://github.com/leprechau) for the review. #### Features From b781c49517057ef99c1c096681b14e7020c6583e Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:25:12 +0200 Subject: [PATCH 37/44] Prepare for release 1.5.10 --- CHANGELOG.md | 2 +- main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b378ca901..58d06b9c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ ## Changelog -### Unreleased +### [v1.5.10](https://github.com/fabiolb/fabio/releases/tag/v1.5.10) - 25 Oct 2018 #### Breaking Changes diff --git a/main.go b/main.go index b557b1219..3f2ccda85 100644 --- a/main.go +++ b/main.go @@ -43,7 +43,7 @@ import ( // It is also set by the linker when fabio // is built via the Makefile or the build/docker.sh // script to ensure the correct version nubmer -var version = "1.5.9" +var version = "1.5.10" var shuttingDown int32 From af8ca29cb530a0184abfaf39192f72803a2d2b81 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:29:49 +0200 Subject: [PATCH 38/44] revert version update in main.go This is done by the release script. --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 3f2ccda85..b557b1219 100644 --- a/main.go +++ b/main.go @@ -43,7 +43,7 @@ import ( // It is also set by the linker when fabio // is built via the Makefile or the build/docker.sh // script to ensure the correct version nubmer -var version = "1.5.10" +var version = "1.5.9" var shuttingDown int32 From 4febb84b7dd7579c29d2a309f42b8b3d1dcbdc8d Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:37:29 +0200 Subject: [PATCH 39/44] build: fix docker-test target --- Makefile | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 86266fcc6..0ccfd2628 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,6 @@ GORELEASER = $(shell which goreleaser) # pin versions for CI builds CI_CONSUL_VERSION=1.3.0 CI_VAULT_VERSION=0.11.4 -CI_GO_VERSION=1.11.1 # all is the default target all: test @@ -107,20 +106,12 @@ docker-aliases: # cache the binaries and prevent repeated downloads since # ADD downloads the file every time. docker-test: - test -r consul_$(CI_CONSUL_VERSION)_linux_amd64.zip || \ - wget https://releases.hashicorp.com/consul/$(CI_CONSUL_VERSION)/consul_$(CI_CONSUL_VERSION)_linux_amd64.zip - test -r vault_$(CI_VAULT_VERSION)_linux_amd64.zip || \ - wget https://releases.hashicorp.com/vault/$(CI_VAULT_VERSION)/vault_$(CI_VAULT_VERSION)_linux_amd64.zip - test -r go$(CI_GO_VERSION).linux-amd64.tar.gz || \ - wget https://dl.google.com/go/go$(CI_GO_VERSION).linux-amd64.tar.gz docker build \ --build-arg consul_version=$(CI_CONSUL_VERSION) \ --build-arg vault_version=$(CI_VAULT_VERSION) \ - --build-arg go_version=$(CI_GO_VERSION) \ -t test-fabio \ - -f Dockerfile-test \ + -f Dockerfile \ . - docker run -it test-fabio make test # codeship runs the CI on codeship codeship: From d1e47f6a6bf3c09e1607ea221ecb6a6c6ead5e88 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:47:04 +0200 Subject: [PATCH 40/44] build: update goreleaser config --- .goreleaser.yml | 11 +++++++---- Dockerfile-goreleaser | 7 +++++++ 2 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 Dockerfile-goreleaser diff --git a/.goreleaser.yml b/.goreleaser.yml index 5627fab08..7df22bc83 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -26,9 +26,12 @@ sign: artifacts: checksum dockers: - - image: fabiolb/fabio - latest: true - tag_template: '{{ .Version }}-{{ .Env.GOVERSION }}' + - + dockerfile: Dockerfile-goreleaser + image_templates: + - 'fabiolb/fabio:latest' + - 'fabiolb/fabio:{{ .Version }}-{{ .Env.GOVERSION }}' + - 'magiconair/fabio:latest' + - 'magiconair/fabio:{{ .Version }}-{{ .Env.GOVERSION }}' extra_files: - - build/ca-certificates.crt - fabio.properties diff --git a/Dockerfile-goreleaser b/Dockerfile-goreleaser new file mode 100644 index 000000000..d94cbfbfa --- /dev/null +++ b/Dockerfile-goreleaser @@ -0,0 +1,7 @@ +FROM alpine:3.8 +RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* +COPY fabio /usr/bin +ADD fabio.properties /etc/fabio/fabio.properties +EXPOSE 9998 9999 +ENTRYPOINT ["/usr/bin/fabio"] +CMD ["-cfg", "/etc/fabio/fabio.properties"] From 20146063ef4425333ce4ab757fbcff8a5a90f06a Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:58:55 +0200 Subject: [PATCH 41/44] build: goreleaser can now create multiple Docker tags --- Makefile | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 0ccfd2628..ef34c1106 100644 --- a/Makefile +++ b/Makefile @@ -70,7 +70,7 @@ pkg: build test # later targets can pick up the new tag value. release: $(MAKE) tag - $(MAKE) preflight docker-test gorelease homebrew docker-aliases + $(MAKE) preflight docker-test gorelease homebrew # preflight runs some checks before a release preflight: @@ -91,14 +91,6 @@ gorelease: homebrew: build/homebrew.sh $(LAST_TAG) -# docker-aliases creates aliases for the docker containers -# since goreleaser doesn't handle that properly yet -docker-aliases: - docker tag fabiolb/fabio:$(VERSION)-$(GOVERSION) magiconair/fabio:$(VERSION)-$(GOVERSION) - docker tag fabiolb/fabio:$(VERSION)-$(GOVERSION) magiconair/fabio:latest - docker push magiconair/fabio:$(VERSION)-$(GOVERSION) - docker push magiconair/fabio:latest - # docker-test runs make test in a Docker container with # pinned versions of the external dependencies # From 83b0f600b1a83568788d2af446b261b591b87b32 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Thu, 25 Oct 2018 20:59:27 +0200 Subject: [PATCH 42/44] Release v1.5.10 --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index b557b1219..3f2ccda85 100644 --- a/main.go +++ b/main.go @@ -43,7 +43,7 @@ import ( // It is also set by the linker when fabio // is built via the Makefile or the build/docker.sh // script to ensure the correct version nubmer -var version = "1.5.9" +var version = "1.5.10" var shuttingDown int32 From 4bb0c0276235c0dd729e1bb0867b28db631f4377 Mon Sep 17 00:00:00 2001 From: Frank Schroeder Date: Fri, 26 Oct 2018 09:45:22 +0200 Subject: [PATCH 43/44] update github.com/circonus-labs/circonus-gometrics --- go.mod | 2 +- go.sum | 2 + .../circonus-gometrics/OPTIONS.md | 109 ++++ .../circonus-gometrics/README.md | 305 +++++----- .../circonus-gometrics/api/README.md | 163 ++++++ .../circonus-gometrics/api/account.go | 181 ++++++ .../circonus-gometrics/api/acknowledgement.go | 190 +++++++ .../circonus-gometrics/api/alert.go | 131 +++++ .../circonus-gometrics/api/annotation.go | 223 ++++++++ .../circonus-gometrics/api/api.go | 274 +++++++-- .../circonus-gometrics/api/broker.go | 134 +++-- .../circonus-gometrics/api/check.go | 132 +++-- .../circonus-gometrics/api/check_bundle.go | 255 +++++++++ .../api/check_bundle_metrics.go | 95 ++++ .../circonus-gometrics/api/checkbundle.go | 132 ----- .../circonus-gometrics/api/config/consts.go | 538 ++++++++++++++++++ .../circonus-gometrics/api/contact_group.go | 263 +++++++++ .../circonus-gometrics/api/dashboard.go | 399 +++++++++++++ .../circonus-gometrics/api/doc.go | 63 ++ .../circonus-gometrics/api/graph.go | 349 ++++++++++++ .../circonus-gometrics/api/maintenance.go | 220 +++++++ .../circonus-gometrics/api/metric.go | 162 ++++++ .../circonus-gometrics/api/metric_cluster.go | 261 +++++++++ .../circonus-gometrics/api/outlier_report.go | 221 +++++++ .../api/provision_broker.go | 151 +++++ .../circonus-gometrics/api/rule_set.go | 234 ++++++++ .../circonus-gometrics/api/rule_set_group.go | 231 ++++++++ .../circonus-gometrics/api/user.go | 159 ++++++ .../circonus-gometrics/api/worksheet.go | 232 ++++++++ .../circonus-gometrics/checkmgr/broker.go | 99 ++-- .../circonus-gometrics/checkmgr/cert.go | 25 +- .../circonus-gometrics/checkmgr/check.go | 267 ++++++++- .../circonus-gometrics/checkmgr/checkmgr.go | 208 ++++--- .../circonus-gometrics/checkmgr/metrics.go | 135 ++++- .../circonus-gometrics/circonus-gometrics.go | 160 ++++-- .../circonus-gometrics/counter.go | 15 + .../circonus-labs/circonus-gometrics/gauge.go | 12 + .../circonus-gometrics/histogram.go | 27 +- .../circonus-gometrics/metrics.go | 15 + .../circonus-gometrics/submit.go | 60 +- .../circonus-labs/circonus-gometrics/util.go | 86 ++- vendor/modules.txt | 3 +- 42 files changed, 6259 insertions(+), 664 deletions(-) create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/README.md create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/account.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/user.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/metrics.go diff --git a/go.mod b/go.mod index ed518370a..1d46fb2b3 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/fabiolb/fabio require ( github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 - github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 + github.com/circonus-labs/circonus-gometrics v1.0.0 github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 // indirect github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 github.com/fatih/structs v0.0.0-20160601093117-5ada2f449b10 // indirect diff --git a/go.sum b/go.sum index 74da97231..eb2331859 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 h1:AuTQOaYRBRt github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 h1:DHdPk46/oiLoYPQ7Dt10pRx74HaJVoyjS5Ea7eyROes= github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonus-gometrics v1.0.0 h1:pMILaNgfEYrx8qdBukpsVU3muDkqfyPK/kmz1Ww4tQ4= +github.com/circonus-labs/circonus-gometrics v1.0.0/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 h1:g5Ns3zGpLJ0T+1YJ0d3VVPrlMMyFHXV4x82xNgIT9z0= github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 h1:TYSgSeiA3LfBi6son0KgHb4wcTXV1kB5wZtTorXQ914= diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md new file mode 100644 index 000000000..1cd78d19e --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md @@ -0,0 +1,109 @@ +## Circonus gometrics options + +### Example defaults +```go +package main + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path" + + cgm "github.com/circonus-labs/circonus-gometrics" +) + +func main() { + cfg := &cgm.Config{} + + // Defaults + + // General + cfg.Debug = false + cfg.Log = log.New(ioutil.Discard, "", log.LstdFlags) + cfg.Interval = "10s" + cfg.ResetCounters = "true" + cfg.ResetGauges = "true" + cfg.ResetHistograms = "true" + cfg.ResetText = "true" + + // API + cfg.CheckManager.API.TokenKey = "" + cfg.CheckManager.API.TokenApp = "circonus-gometrics" + cfg.CheckManager.API.TokenURL = "https://api.circonus.com/v2" + cfg.CheckManager.API.CACert = nil + + // Check + _, an := path.Split(os.Args[0]) + hn, _ := os.Hostname() + cfg.CheckManager.Check.ID = "" + cfg.CheckManager.Check.SubmissionURL = "" + cfg.CheckManager.Check.InstanceID = fmt.Sprintf("%s:%s", hn, an) + cfg.CheckManager.Check.TargetHost = cfg.CheckManager.Check.InstanceID + cfg.CheckManager.Check.DisplayName = cfg.CheckManager.Check.InstanceID + cfg.CheckManager.Check.SearchTag = fmt.Sprintf("service:%s", an) + cfg.CheckManager.Check.Tags = "" + cfg.CheckManager.Check.Secret = "" // randomly generated sha256 hash + cfg.CheckManager.Check.MaxURLAge = "5m" + cfg.CheckManager.Check.ForceMetricActivation = "false" + + // Broker + cfg.CheckManager.Broker.ID = "" + cfg.CheckManager.Broker.SelectTag = "" + cfg.CheckManager.Broker.MaxResponseTime = "500ms" + + // create a new cgm instance and start sending metrics... + // see the complete example in the main README. +} +``` + +## Options +| Option | Default | Description | +| ------ | ------- | ----------- | +| General || +| `cfg.Log` | none | log.Logger instance to send logging messages. Default is to discard messages. If Debug is turned on and no instance is specified, messages will go to stderr. | +| `cfg.Debug` | false | Turn on debugging messages. | +| `cfg.Interval` | "10s" | Interval at which metrics are flushed and sent to Circonus. Set to "0s" to disable automatic flush (note, if disabled, `cgm.Flush()` must be called manually to send metrics to Circonus).| +| `cfg.ResetCounters` | "true" | Reset counter metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +| `cfg.ResetGauges` | "true" | Reset gauge metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +| `cfg.ResetHistograms` | "true" | Reset histogram metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +| `cfg.ResetText` | "true" | Reset text metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +|API|| +| `cfg.CheckManager.API.TokenKey` | "" | [Circonus API Token key](https://login.circonus.com/user/tokens) | +| `cfg.CheckManager.API.TokenApp` | "circonus-gometrics" | App associated with API token | +| `cfg.CheckManager.API.URL` | "https://api.circonus.com/v2" | Circonus API URL | +| `cfg.CheckManager.API.CACert` | nil | [*x509.CertPool](https://golang.org/pkg/crypto/x509/#CertPool) with CA Cert to validate API endpoint using internal CA or self-signed certificates | +|Check|| +| `cfg.CheckManager.Check.ID` | "" | Check ID of previously created check. (*Note: **check id** not **check bundle id**.*) | +| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. | +| `cfg.CheckManager.Check.InstanceID` | hostname:program name | An identifier for the 'group of metrics emitted by this process or service'. | +| `cfg.CheckManager.Check.TargetHost` | InstanceID | Explicit setting of `check.target`. | +| `cfg.CheckManager.Check.DisplayName` | InstanceID | Custom `check.display_name`. Shows in UI check list. | +| `cfg.CheckManager.Check.SearchTag` | service:program name | Specific tag used to search for an existing check when neither SubmissionURL nor ID are provided. | +| `cfg.CheckManager.Check.Tags` | "" | List (comma separated) of tags to add to check when it is being created. The SearchTag will be added to the list. | +| `cfg.CheckManager.Check.Secret` | random generated | A secret to use for when creating an httptrap check. | +| `cfg.CheckManager.Check.MaxURLAge` | "5m" | Maximum amount of time to retry a [failing] submission URL before refreshing it. | +| `cfg.CheckManager.Check.ForceMetricActivation` | "false" | If a metric has been disabled via the UI the default behavior is to *not* re-activate the metric; this setting overrides the behavior and will re-activate the metric when it is encountered. | +|Broker|| +| `cfg.CheckManager.Broker.ID` | "" | ID of a specific broker to use when creating a check. Default is to use a random enterprise broker or the public Circonus default broker. | +| `cfg.CheckManager.Broker.SelectTag` | "" | Used to select a broker with the same tag(s). If more than one broker has the tag(s), one will be selected randomly from the resulting list. (e.g. could be used to select one from a list of brokers serving a specific colo/region. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west") | +| `cfg.CheckManager.Broker.MaxResponseTime` | "500ms" | Maximum amount time to wait for a broker connection test to be considered valid. (if latency is > the broker will be considered invalid and not available for selection.) | + +## Notes: + +* All options are *strings* with the following exceptions: + * `cfg.Log` - an instance of [`log.Logger`](https://golang.org/pkg/log/#Logger) or something else (e.g. [logrus](https://github.com/Sirupsen/logrus)) which can be used to satisfy the interface requirements. + * `cfg.Debug` - a boolean true|false. +* At a minimum, one of either `API.TokenKey` or `Check.SubmissionURL` is **required** for cgm to function. +* Check management can be disabled by providing a `Check.SubmissionURL` without an `API.TokenKey`. Note: the supplied URL needs to be http or the broker needs to be running with a cert which can be verified. Otherwise, the `API.TokenKey` will be required to retrieve the correct CA certificate to validate the broker's cert for the SSL connection. +* A note on `Check.InstanceID`, the instance id is used to consistently identify a check. The display name can be changed in the UI. The hostname may be ephemeral. For metric continuity, the instance id is used to locate existing checks. Since the check.target is never actually used by an httptrap check it is more decorative than functional, a valid FQDN is not required for an httptrap check.target. But, using instance id as the target can pollute the Host list in the UI with host:application specific entries. +* Check identification precedence + 1. Check SubmissionURL + 2. Check ID + 3. Search + 1. Search for an active httptrap check for TargetHost which has the SearchTag + 2. Search for an active httptrap check which has the SearchTag and the InstanceID in the notes field + 3. Create a new check +* Broker selection + 1. If Broker.ID or Broker.SelectTag are not specified, a broker will be selected randomly from the list of brokers available to the API token. Enterprise brokers take precedence. A viable broker is "active", has the "httptrap" module enabled, and responds within Broker.MaxResponseTime. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/README.md index d245e40af..ca6cabb23 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/README.md +++ b/vendor/github.com/circonus-labs/circonus-gometrics/README.md @@ -1,156 +1,208 @@ # Circonus metrics tracking for Go applications -This library supports named counters, gauges and histograms. -It also provides convenience wrappers for registering latency -instrumented functions with Go's builtin http server. +This library supports named counters, gauges and histograms. It also provides convenience wrappers for registering latency instrumented functions with Go's builtin http server. -Initializing only requires setting an ApiToken. +Initializing only requires setting an [API Token](https://login.circonus.com/user/tokens) at a minimum. + +## Options + +See [OPTIONS.md](OPTIONS.md) for information on all of the available cgm options. ## Example -**rough and simple** +### Bare bones minimum + +A working cut-n-past example. Simply set the required environment variable `CIRCONUS_API_TOKEN` and run. ```go package main import ( - "log" - "math/rand" - "os" - "time" + "log" + "math/rand" + "os" + "os/signal" + "syscall" + "time" - cgm "github.com/circonus-labs/circonus-gometrics" + cgm "github.com/circonus-labs/circonus-gometrics" ) func main() { - log.Println("Configuring cgm") - - cmc := &cgm.Config{} - - // Interval at which metrics are submitted to Circonus, default: 10 seconds - cmc.Interval = "10s" // 10 seconds - // Enable debug messages, default: false - cmc.Debug = false - // Send debug messages to specific log.Logger instance - // default: if debug stderr, else, discard - //cmc.CheckManager.Log = ... - - // Circonus API configuration options - // - // Token, no default (blank disables check manager) - cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") - // App name, default: circonus-gometrics - cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") - // URL, default: https://api.circonus.com/v2 - cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") - - // Check configuration options - // - // precedence 1 - explicit submission_url - // precedence 2 - specific check id (note: not a check bundle id) - // precedence 3 - search using instanceId and searchTag - // otherwise: if an applicable check is NOT specified or found, an - // attempt will be made to automatically create one - // - // Pre-existing httptrap check submission_url - cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL") - // Pre-existing httptrap check id (check not check bundle) - cmc.CheckManager.Check.ID = "" - // if neither a submission url nor check id are provided, an attempt will be made to find an existing - // httptrap check by using the circonus api to search for a check matching the following criteria: - // an active check, - // of type httptrap, - // where the target/host is equal to InstanceId - see below - // and the check has a tag equal to SearchTag - see below - // Instance ID - an identifier for the 'group of metrics emitted by this process or service' - // this is used as the value for check.target (aka host) - // default: 'hostname':'program name' - // note: for a persistent instance that is ephemeral or transient where metric continuity is - // desired set this explicitly so that the current hostname will not be used. - cmc.CheckManager.Check.InstanceID = "" - // Search tag - a specific tag which, when coupled with the instanceId serves to identify the - // origin and/or grouping of the metrics - // default: service:application name (e.g. service:consul) - cmc.CheckManager.Check.SearchTag = "" - // Check secret, default: generated when a check needs to be created - cmc.CheckManager.Check.Secret = "" - // Check tags, array of strings, additional tags to add to a new check, default: none - //cmc.CheckManager.Check.Tags = []string{"category:tagname"} - // max amount of time to to hold on to a submission url - // when a given submission fails (due to retries) if the - // time the url was last updated is > than this, the trap - // url will be refreshed (e.g. if the broker is changed - // in the UI) default 5 minutes - cmc.CheckManager.Check.MaxURLAge = "5m" - // custom display name for check, default: "InstanceId /cgm" - cmc.CheckManager.Check.DisplayName = "" - // force metric activation - if a metric has been disabled via the UI - // the default behavior is to *not* re-activate the metric; this setting - // overrides the behavior and will re-activate the metric when it is - // encountered. "(true|false)", default "false" - cmc.CheckManager.Check.ForceMetricActivation = "false" - - // Broker configuration options - // - // Broker ID of specific broker to use, default: random enterprise broker or - // Circonus default if no enterprise brokers are available. - // default: only used if set - cmc.CheckManager.Broker.ID = "" - // used to select a broker with the same tag (e.g. can be used to dictate that a broker - // serving a specific location should be used. "dc:sfo", "location:new_york", "zone:us-west") - // if more than one broker has the tag, one will be selected randomly from the resulting list - // default: not used unless != "" - cmc.CheckManager.Broker.SelectTag = "" - // longest time to wait for a broker connection (if latency is > the broker will - // be considered invalid and not available for selection.), default: 500 milliseconds - cmc.CheckManager.Broker.MaxResponseTime = "500ms" - // if broker Id or SelectTag are not specified, a broker will be selected randomly - // from the list of brokers available to the api token. enterprise brokers take precedence - // viable brokers are "active", have the "httptrap" module enabled, are reachable and respond - // within MaxResponseTime. - - log.Println("Creating new cgm instance") - - metrics, err := cgm.NewCirconusMetrics(cmc) - if err != nil { - panic(err) - } - - src := rand.NewSource(time.Now().UnixNano()) - rnd := rand.New(src) - - log.Println("Starting cgm internal auto-flush timer") - metrics.Start() - - log.Println("Starting to send metrics") - - // number of "sets" of metrics to send (a minute worth) - max := 60 - - for i := 1; i < max; i++ { - log.Printf("\tmetric set %d of %d", i, 60) - metrics.Timing("ding", rnd.Float64()*10) - metrics.Increment("dong") - metrics.Gauge("dang", 10) - time.Sleep(1000 * time.Millisecond) - } - - log.Println("Flushing any outstanding metrics manually") - metrics.Flush() + logger := log.New(os.Stdout, "", log.LstdFlags) + + logger.Println("Configuring cgm") + + cmc := &cgm.Config{} + cmc.Debug = false // set to true for debug messages + cmc.Log = logger + + // Circonus API Token key (https://login.circonus.com/user/tokens) + cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") + + logger.Println("Creating new cgm instance") + + metrics, err := cgm.NewCirconusMetrics(cmc) + if err != nil { + logger.Println(err) + os.Exit(1) + } + src := rand.NewSource(time.Now().UnixNano()) + rnd := rand.New(src) + + logger.Println("Adding ctrl-c trap") + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + logger.Println("Received CTRL-C, flushing outstanding metrics before exit") + metrics.Flush() + os.Exit(0) + }() + + logger.Println("Starting to send metrics") + + // number of "sets" of metrics to send + max := 60 + + for i := 1; i < max; i++ { + logger.Printf("\tmetric set %d of %d", i, 60) + metrics.Timing("foo", rnd.Float64()*10) + metrics.Increment("bar") + metrics.Gauge("baz", 10) + time.Sleep(time.Second) + } + + metrics.SetText("fini", "complete") + + logger.Println("Flushing any outstanding metrics manually") + metrics.Flush() } ``` -### HTTP Handler wrapping +### A more complete example + +A working, cut-n-paste example with all options available for modification. Also, demonstrates metric tagging. + +```go +package main + +import ( + "log" + "math/rand" + "os" + "os/signal" + "syscall" + "time" + + cgm "github.com/circonus-labs/circonus-gometrics" +) + +func main() { + logger := log.New(os.Stdout, "", log.LstdFlags) + + logger.Println("Configuring cgm") + + cmc := &cgm.Config{} + + // General + + cmc.Interval = "10s" + cmc.Log = logger + cmc.Debug = false + cmc.ResetCounters = "true" + cmc.ResetGauges = "true" + cmc.ResetHistograms = "true" + cmc.ResetText = "true" + + // Circonus API configuration options + cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") + cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") + cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") + + // Check configuration options + cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISSION_URL") + cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID") + cmc.CheckManager.Check.InstanceID = "" + cmc.CheckManager.Check.DisplayName = "" + cmc.CheckManager.Check.TargetHost = "" + // if hn, err := os.Hostname(); err == nil { + // cmc.CheckManager.Check.TargetHost = hn + // } + cmc.CheckManager.Check.SearchTag = "" + cmc.CheckManager.Check.Secret = "" + cmc.CheckManager.Check.Tags = "" + cmc.CheckManager.Check.MaxURLAge = "5m" + cmc.CheckManager.Check.ForceMetricActivation = "false" + + // Broker configuration options + cmc.CheckManager.Broker.ID = "" + cmc.CheckManager.Broker.SelectTag = "" + cmc.CheckManager.Broker.MaxResponseTime = "500ms" + + logger.Println("Creating new cgm instance") + + metrics, err := cgm.NewCirconusMetrics(cmc) + if err != nil { + logger.Println(err) + os.Exit(1) + } + + src := rand.NewSource(time.Now().UnixNano()) + rnd := rand.New(src) + + logger.Println("Adding ctrl-c trap") + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + logger.Println("Received CTRL-C, flushing outstanding metrics before exit") + metrics.Flush() + os.Exit(0) + }() + + // Add metric tags (append to any existing tags on specified metric) + metrics.AddMetricTags("foo", []string{"cgm:test"}) + metrics.AddMetricTags("baz", []string{"cgm:test"}) + + logger.Println("Starting to send metrics") + + // number of "sets" of metrics to send + max := 60 + + for i := 1; i < max; i++ { + logger.Printf("\tmetric set %d of %d", i, 60) + + metrics.Timing("foo", rnd.Float64()*10) + metrics.Increment("bar") + metrics.Gauge("baz", 10) + + if i == 35 { + // Set metric tags (overwrite current tags on specified metric) + metrics.SetMetricTags("baz", []string{"cgm:reset_test", "cgm:test2"}) + } + + time.Sleep(time.Second) + } + + logger.Println("Flushing any outstanding metrics manually") + metrics.Flush() + +} ``` + +### HTTP Handler wrapping + +```go http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func)) ``` ### HTTP latency example -``` +```go package main import ( @@ -168,7 +220,6 @@ func main() { if err != nil { panic(err) } - metrics.Start() http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:]) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/api/README.md new file mode 100644 index 000000000..8f286b79f --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/README.md @@ -0,0 +1,163 @@ +## Circonus API package + +Full api documentation (for using *this* package) is available at [godoc.org](https://godoc.org/github.com/circonus-labs/circonus-gometrics/api). Links in the lists below go directly to the generic Circonus API documentation for the endpoint. + +### Straight [raw] API access + +* Get +* Post (for creates) +* Put (for updates) +* Delete + +### Helpers for currently supported API endpoints + +> Note, these interfaces are still being actively developed. For example, many of the `New*` methods only return an empty struct; sensible defaults will be added going forward. Other, common helper methods for the various endpoints may be added as use cases emerge. The organization +of the API may change if common use contexts would benefit significantly. + +* [Account](https://login.circonus.com/resources/api/calls/account) + * FetchAccount + * FetchAccounts + * UpdateAccount + * SearchAccounts +* [Acknowledgement](https://login.circonus.com/resources/api/calls/acknowledgement) + * NewAcknowledgement + * FetchAcknowledgement + * FetchAcknowledgements + * UpdateAcknowledgement + * CreateAcknowledgement + * DeleteAcknowledgement + * DeleteAcknowledgementByCID + * SearchAcknowledgements +* [Alert](https://login.circonus.com/resources/api/calls/alert) + * FetchAlert + * FetchAlerts + * SearchAlerts +* [Annotation](https://login.circonus.com/resources/api/calls/annotation) + * NewAnnotation + * FetchAnnotation + * FetchAnnotations + * UpdateAnnotation + * CreateAnnotation + * DeleteAnnotation + * DeleteAnnotationByCID + * SearchAnnotations +* [Broker](https://login.circonus.com/resources/api/calls/broker) + * FetchBroker + * FetchBrokers + * SearchBrokers +* [Check Bundle](https://login.circonus.com/resources/api/calls/check_bundle) + * NewCheckBundle + * FetchCheckBundle + * FetchCheckBundles + * UpdateCheckBundle + * CreateCheckBundle + * DeleteCheckBundle + * DeleteCheckBundleByCID + * SearchCheckBundles +* [Check Bundle Metrics](https://login.circonus.com/resources/api/calls/check_bundle_metrics) + * FetchCheckBundleMetrics + * UpdateCheckBundleMetrics +* [Check](https://login.circonus.com/resources/api/calls/check) + * FetchCheck + * FetchChecks + * SearchChecks +* [Contact Group](https://login.circonus.com/resources/api/calls/contact_group) + * NewContactGroup + * FetchContactGroup + * FetchContactGroups + * UpdateContactGroup + * CreateContactGroup + * DeleteContactGroup + * DeleteContactGroupByCID + * SearchContactGroups +* [Dashboard](https://login.circonus.com/resources/api/calls/dashboard) -- note, this is a work in progress, the methods/types may still change + * NewDashboard + * FetchDashboard + * FetchDashboards + * UpdateDashboard + * CreateDashboard + * DeleteDashboard + * DeleteDashboardByCID + * SearchDashboards +* [Graph](https://login.circonus.com/resources/api/calls/graph) + * NewGraph + * FetchGraph + * FetchGraphs + * UpdateGraph + * CreateGraph + * DeleteGraph + * DeleteGraphByCID + * SearchGraphs +* [Metric Cluster](https://login.circonus.com/resources/api/calls/metric_cluster) + * NewMetricCluster + * FetchMetricCluster + * FetchMetricClusters + * UpdateMetricCluster + * CreateMetricCluster + * DeleteMetricCluster + * DeleteMetricClusterByCID + * SearchMetricClusters +* [Metric](https://login.circonus.com/resources/api/calls/metric) + * FetchMetric + * FetchMetrics + * UpdateMetric + * SearchMetrics +* [Maintenance window](https://login.circonus.com/resources/api/calls/maintenance) + * NewMaintenanceWindow + * FetchMaintenanceWindow + * FetchMaintenanceWindows + * UpdateMaintenanceWindow + * CreateMaintenanceWindow + * DeleteMaintenanceWindow + * DeleteMaintenanceWindowByCID + * SearchMaintenanceWindows +* [Outlier Report](https://login.circonus.com/resources/api/calls/outlier_report) + * NewOutlierReport + * FetchOutlierReport + * FetchOutlierReports + * UpdateOutlierReport + * CreateOutlierReport + * DeleteOutlierReport + * DeleteOutlierReportByCID + * SearchOutlierReports +* [Provision Broker](https://login.circonus.com/resources/api/calls/provision_broker) + * NewProvisionBroker + * FetchProvisionBroker + * UpdateProvisionBroker + * CreateProvisionBroker +* [Rule Set](https://login.circonus.com/resources/api/calls/rule_set) + * NewRuleset + * FetchRuleset + * FetchRulesets + * UpdateRuleset + * CreateRuleset + * DeleteRuleset + * DeleteRulesetByCID + * SearchRulesets +* [Rule Set Group](https://login.circonus.com/resources/api/calls/rule_set_group) + * NewRulesetGroup + * FetchRulesetGroup + * FetchRulesetGroups + * UpdateRulesetGroup + * CreateRulesetGroup + * DeleteRulesetGroup + * DeleteRulesetGroupByCID + * SearchRulesetGroups +* [User](https://login.circonus.com/resources/api/calls/user) + * FetchUser + * FetchUsers + * UpdateUser + * SearchUsers +* [Worksheet](https://login.circonus.com/resources/api/calls/worksheet) + * NewWorksheet + * FetchWorksheet + * FetchWorksheets + * UpdateWorksheet + * CreateWorksheet + * DeleteWorksheet + * DeleteWorksheetByCID + * SearchWorksheets + +--- + +Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/account.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/account.go new file mode 100644 index 000000000..dd8ff577d --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/account.go @@ -0,0 +1,181 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Account API support - Fetch and Update +// See: https://login.circonus.com/resources/api/calls/account +// Note: Create and Delete are not supported for Accounts via the API + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// AccountLimit defines a usage limit imposed on account +type AccountLimit struct { + Limit uint `json:"_limit,omitempty"` // uint >=0 + Type string `json:"_type,omitempty"` // string + Used uint `json:"_used,omitempty"` // uint >=0 +} + +// AccountInvite defines outstanding invites +type AccountInvite struct { + Email string `json:"email"` // string + Role string `json:"role"` // string +} + +// AccountUser defines current users +type AccountUser struct { + Role string `json:"role"` // string + UserCID string `json:"user"` // string +} + +// Account defines an account. See https://login.circonus.com/resources/api/calls/account for more information. +type Account struct { + Address1 *string `json:"address1,omitempty"` // string or null + Address2 *string `json:"address2,omitempty"` // string or null + CCEmail *string `json:"cc_email,omitempty"` // string or null + CID string `json:"_cid,omitempty"` // string + City *string `json:"city,omitempty"` // string or null + ContactGroups []string `json:"_contact_groups,omitempty"` // [] len >= 0 + Country string `json:"country_code,omitempty"` // string + Description *string `json:"description,omitempty"` // string or null + Invites []AccountInvite `json:"invites,omitempty"` // [] len >= 0 + Name string `json:"name,omitempty"` // string + OwnerCID string `json:"_owner,omitempty"` // string + StateProv *string `json:"state_prov,omitempty"` // string or null + Timezone string `json:"timezone,omitempty"` // string + UIBaseURL string `json:"_ui_base_url,omitempty"` // string + Usage []AccountLimit `json:"_usage,omitempty"` // [] len >= 0 + Users []AccountUser `json:"users,omitempty"` // [] len >= 0 +} + +// FetchAccount retrieves account with passed cid. Pass nil for '/account/current'. +func (a *API) FetchAccount(cid CIDType) (*Account, error) { + var accountCID string + + if cid == nil || *cid == "" { + accountCID = config.AccountPrefix + "/current" + } else { + accountCID = string(*cid) + } + + matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid account CID [%s]", accountCID) + } + + result, err := a.Get(accountCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] account fetch, received JSON: %s", string(result)) + } + + account := new(Account) + if err := json.Unmarshal(result, account); err != nil { + return nil, err + } + + return account, nil +} + +// FetchAccounts retrieves all accounts available to the API Token. +func (a *API) FetchAccounts() (*[]Account, error) { + result, err := a.Get(config.AccountPrefix) + if err != nil { + return nil, err + } + + var accounts []Account + if err := json.Unmarshal(result, &accounts); err != nil { + return nil, err + } + + return &accounts, nil +} + +// UpdateAccount updates passed account. +func (a *API) UpdateAccount(cfg *Account) (*Account, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid account config [nil]") + } + + accountCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid account CID [%s]", accountCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] account update, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(accountCID, jsonCfg) + if err != nil { + return nil, err + } + + account := &Account{} + if err := json.Unmarshal(result, account); err != nil { + return nil, err + } + + return account, nil +} + +// SearchAccounts returns accounts matching a filter (search queries are not +// suppoted by the account endpoint). Pass nil as filter for all accounts the +// API Token can access. +func (a *API) SearchAccounts(filterCriteria *SearchFilterType) (*[]Account, error) { + q := url.Values{} + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAccounts() + } + + reqURL := url.URL{ + Path: config.AccountPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var accounts []Account + if err := json.Unmarshal(result, &accounts); err != nil { + return nil, err + } + + return &accounts, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go new file mode 100644 index 000000000..f6da51d4d --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go @@ -0,0 +1,190 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Acknowledgement API support - Fetch, Create, Update, Delete*, and Search +// See: https://login.circonus.com/resources/api/calls/acknowledgement +// * : delete (cancel) by updating with AcknowledgedUntil set to 0 + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Acknowledgement defines a acknowledgement. See https://login.circonus.com/resources/api/calls/acknowledgement for more information. +type Acknowledgement struct { + AcknowledgedBy string `json:"_acknowledged_by,omitempty"` // string + AcknowledgedOn uint `json:"_acknowledged_on,omitempty"` // uint + AcknowledgedUntil interface{} `json:"acknowledged_until,omitempty"` // NOTE received as uint; can be set using string or uint + Active bool `json:"_active,omitempty"` // bool + AlertCID string `json:"alert,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + Notes string `json:"notes,omitempty"` // string +} + +// NewAcknowledgement returns new Acknowledgement (with defaults, if applicable). +func NewAcknowledgement() *Acknowledgement { + return &Acknowledgement{} +} + +// FetchAcknowledgement retrieves acknowledgement with passed cid. +func (a *API) FetchAcknowledgement(cid CIDType) (*Acknowledgement, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid acknowledgement CID [none]") + } + + acknowledgementCID := string(*cid) + + matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID) + } + + result, err := a.Get(acknowledgementCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] acknowledgement fetch, received JSON: %s", string(result)) + } + + acknowledgement := &Acknowledgement{} + if err := json.Unmarshal(result, acknowledgement); err != nil { + return nil, err + } + + return acknowledgement, nil +} + +// FetchAcknowledgements retrieves all acknowledgements available to the API Token. +func (a *API) FetchAcknowledgements() (*[]Acknowledgement, error) { + result, err := a.Get(config.AcknowledgementPrefix) + if err != nil { + return nil, err + } + + var acknowledgements []Acknowledgement + if err := json.Unmarshal(result, &acknowledgements); err != nil { + return nil, err + } + + return &acknowledgements, nil +} + +// UpdateAcknowledgement updates passed acknowledgement. +func (a *API) UpdateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid acknowledgement config [nil]") + } + + acknowledgementCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] acknowledgement update, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(acknowledgementCID, jsonCfg) + if err != nil { + return nil, err + } + + acknowledgement := &Acknowledgement{} + if err := json.Unmarshal(result, acknowledgement); err != nil { + return nil, err + } + + return acknowledgement, nil +} + +// CreateAcknowledgement creates a new acknowledgement. +func (a *API) CreateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid acknowledgement config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + result, err := a.Post(config.AcknowledgementPrefix, jsonCfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] acknowledgement create, sending JSON: %s", string(jsonCfg)) + } + + acknowledgement := &Acknowledgement{} + if err := json.Unmarshal(result, acknowledgement); err != nil { + return nil, err + } + + return acknowledgement, nil +} + +// SearchAcknowledgements returns acknowledgements matching +// the specified search query and/or filter. If nil is passed for +// both parameters all acknowledgements will be returned. +func (a *API) SearchAcknowledgements(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Acknowledgement, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAcknowledgements() + } + + reqURL := url.URL{ + Path: config.AcknowledgementPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var acknowledgements []Acknowledgement + if err := json.Unmarshal(result, &acknowledgements); err != nil { + return nil, err + } + + return &acknowledgements, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go new file mode 100644 index 000000000..a242d3d85 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go @@ -0,0 +1,131 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Alert API support - Fetch and Search +// See: https://login.circonus.com/resources/api/calls/alert + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Alert defines a alert. See https://login.circonus.com/resources/api/calls/alert for more information. +type Alert struct { + AcknowledgementCID *string `json:"_acknowledgement,omitempty"` // string or null + AlertURL string `json:"_alert_url,omitempty"` // string + BrokerCID string `json:"_broker,omitempty"` // string + CheckCID string `json:"_check,omitempty"` // string + CheckName string `json:"_check_name,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + ClearedOn *uint `json:"_cleared_on,omitempty"` // uint or null + ClearedValue *string `json:"_cleared_value,omitempty"` // string or null + Maintenance []string `json:"_maintenance,omitempty"` // [] len >= 0 + MetricLinkURL *string `json:"_metric_link,omitempty"` // string or null + MetricName string `json:"_metric_name,omitempty"` // string + MetricNotes *string `json:"_metric_notes,omitempty"` // string or null + OccurredOn uint `json:"_occurred_on,omitempty"` // uint + RuleSetCID string `json:"_rule_set,omitempty"` // string + Severity uint `json:"_severity,omitempty"` // uint + Tags []string `json:"_tags,omitempty"` // [] len >= 0 + Value string `json:"_value,omitempty"` // string +} + +// NewAlert returns a new alert (with defaults, if applicable) +func NewAlert() *Alert { + return &Alert{} +} + +// FetchAlert retrieves alert with passed cid. +func (a *API) FetchAlert(cid CIDType) (*Alert, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid alert CID [none]") + } + + alertCID := string(*cid) + + matched, err := regexp.MatchString(config.AlertCIDRegex, alertCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid alert CID [%s]", alertCID) + } + + result, err := a.Get(alertCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch alert, received JSON: %s", string(result)) + } + + alert := &Alert{} + if err := json.Unmarshal(result, alert); err != nil { + return nil, err + } + + return alert, nil +} + +// FetchAlerts retrieves all alerts available to the API Token. +func (a *API) FetchAlerts() (*[]Alert, error) { + result, err := a.Get(config.AlertPrefix) + if err != nil { + return nil, err + } + + var alerts []Alert + if err := json.Unmarshal(result, &alerts); err != nil { + return nil, err + } + + return &alerts, nil +} + +// SearchAlerts returns alerts matching the specified search query +// and/or filter. If nil is passed for both parameters all alerts +// will be returned. +func (a *API) SearchAlerts(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Alert, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAlerts() + } + + reqURL := url.URL{ + Path: config.AlertPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var alerts []Alert + if err := json.Unmarshal(result, &alerts); err != nil { + return nil, err + } + + return &alerts, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go new file mode 100644 index 000000000..589ec6da9 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go @@ -0,0 +1,223 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Annotation API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/annotation + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Annotation defines a annotation. See https://login.circonus.com/resources/api/calls/annotation for more information. +type Annotation struct { + Category string `json:"category"` // string + CID string `json:"_cid,omitempty"` // string + Created uint `json:"_created,omitempty"` // uint + Description string `json:"description"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + RelatedMetrics []string `json:"rel_metrics"` // [] len >= 0 + Start uint `json:"start"` // uint + Stop uint `json:"stop"` // uint + Title string `json:"title"` // string +} + +// NewAnnotation returns a new Annotation (with defaults, if applicable) +func NewAnnotation() *Annotation { + return &Annotation{} +} + +// FetchAnnotation retrieves annotation with passed cid. +func (a *API) FetchAnnotation(cid CIDType) (*Annotation, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid annotation CID [none]") + } + + annotationCID := string(*cid) + + matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID) + } + + result, err := a.Get(annotationCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch annotation, received JSON: %s", string(result)) + } + + annotation := &Annotation{} + if err := json.Unmarshal(result, annotation); err != nil { + return nil, err + } + + return annotation, nil +} + +// FetchAnnotations retrieves all annotations available to the API Token. +func (a *API) FetchAnnotations() (*[]Annotation, error) { + result, err := a.Get(config.AnnotationPrefix) + if err != nil { + return nil, err + } + + var annotations []Annotation + if err := json.Unmarshal(result, &annotations); err != nil { + return nil, err + } + + return &annotations, nil +} + +// UpdateAnnotation updates passed annotation. +func (a *API) UpdateAnnotation(cfg *Annotation) (*Annotation, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid annotation config [nil]") + } + + annotationCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update annotation, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(annotationCID, jsonCfg) + if err != nil { + return nil, err + } + + annotation := &Annotation{} + if err := json.Unmarshal(result, annotation); err != nil { + return nil, err + } + + return annotation, nil +} + +// CreateAnnotation creates a new annotation. +func (a *API) CreateAnnotation(cfg *Annotation) (*Annotation, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid annotation config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.AnnotationPrefix, jsonCfg) + if err != nil { + return nil, err + } + + annotation := &Annotation{} + if err := json.Unmarshal(result, annotation); err != nil { + return nil, err + } + + return annotation, nil +} + +// DeleteAnnotation deletes passed annotation. +func (a *API) DeleteAnnotation(cfg *Annotation) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid annotation config [nil]") + } + + return a.DeleteAnnotationByCID(CIDType(&cfg.CID)) +} + +// DeleteAnnotationByCID deletes annotation with passed cid. +func (a *API) DeleteAnnotationByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid annotation CID [none]") + } + + annotationCID := string(*cid) + + matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid annotation CID [%s]", annotationCID) + } + + _, err = a.Delete(annotationCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchAnnotations returns annotations matching the specified +// search query and/or filter. If nil is passed for both parameters +// all annotations will be returned. +func (a *API) SearchAnnotations(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Annotation, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAnnotations() + } + + reqURL := url.URL{ + Path: config.AnnotationPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var annotations []Annotation + if err := json.Unmarshal(result, &annotations); err != nil { + return nil, err + } + + return &annotations, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go index 7a7f18708..598e20f2d 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go @@ -2,31 +2,48 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package api provides methods for interacting with the Circonus API package api import ( "bytes" + crand "crypto/rand" + "crypto/tls" + "crypto/x509" "errors" "fmt" "io/ioutil" "log" + "math" + "math/big" + "math/rand" + "net" "net/http" "net/url" "os" + "regexp" "strings" + "sync" "time" "github.com/hashicorp/go-retryablehttp" ) +func init() { + n, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + rand.Seed(time.Now().UTC().UnixNano()) + return + } + rand.Seed(n.Int64()) +} + const ( // a few sensible defaults defaultAPIURL = "https://api.circonus.com/v2" defaultAPIApp = "circonus-gometrics" - minRetryWait = 10 * time.Millisecond - maxRetryWait = 50 * time.Millisecond - maxRetries = 3 + minRetryWait = 1 * time.Second + maxRetryWait = 15 * time.Second + maxRetries = 4 // equating to 1 + maxRetries total attempts ) // TokenKeyType - Circonus API Token key @@ -35,41 +52,63 @@ type TokenKeyType string // TokenAppType - Circonus API Token app name type TokenAppType string -// IDType Circonus object id (numeric portion of cid) -type IDType int +// TokenAccountIDType - Circonus API Token account id +type TokenAccountIDType string // CIDType Circonus object cid -type CIDType string +type CIDType *string + +// IDType Circonus object id +type IDType int // URLType submission url type type URLType string -// SearchQueryType search query +// SearchQueryType search query (see: https://login.circonus.com/resources/api#searching) type SearchQueryType string -// SearchTagType search/select tag type -type SearchTagType string +// SearchFilterType search filter (see: https://login.circonus.com/resources/api#filtering) +type SearchFilterType map[string][]string + +// TagType search/select/custom tag(s) type +type TagType []string // Config options for Circonus API type Config struct { - URL string - TokenKey string - TokenApp string - Log *log.Logger - Debug bool + URL string + TokenKey string + TokenApp string + TokenAccountID string + CACert *x509.CertPool + Log *log.Logger + Debug bool } // API Circonus API type API struct { - apiURL *url.URL - key TokenKeyType - app TokenAppType - Debug bool - Log *log.Logger + apiURL *url.URL + key TokenKeyType + app TokenAppType + accountID TokenAccountIDType + caCert *x509.CertPool + Debug bool + Log *log.Logger + useExponentialBackoff bool + useExponentialBackoffmu sync.Mutex +} + +// NewClient returns a new Circonus API (alias for New) +func NewClient(ac *Config) (*API, error) { + return New(ac) } -// NewAPI returns a new Circonus API +// NewAPI returns a new Circonus API (alias for New) func NewAPI(ac *Config) (*API, error) { + return New(ac) +} + +// New returns a new Circonus API +func New(ac *Config) (*API, error) { if ac == nil { return nil, errors.New("Invalid API configuration (nil)") @@ -85,6 +124,8 @@ func NewAPI(ac *Config) (*API, error) { app = defaultAPIApp } + acctID := TokenAccountIDType(ac.TokenAccountID) + au := string(ac.URL) if au == "" { au = defaultAPIURL @@ -94,6 +135,7 @@ func NewAPI(ac *Config) (*API, error) { au = fmt.Sprintf("https://%s/v2", ac.URL) } if last := len(au) - 1; last >= 0 && au[last] == '/' { + // strip off trailing '/' au = au[:last] } apiURL, err := url.Parse(au) @@ -101,61 +143,127 @@ func NewAPI(ac *Config) (*API, error) { return nil, err } - a := &API{apiURL, key, app, ac.Debug, ac.Log} + a := &API{ + apiURL: apiURL, + key: key, + app: app, + accountID: acctID, + caCert: ac.CACert, + Debug: ac.Debug, + Log: ac.Log, + useExponentialBackoff: false, + } + a.Debug = ac.Debug + a.Log = ac.Log + if a.Debug && a.Log == nil { + a.Log = log.New(os.Stderr, "", log.LstdFlags) + } if a.Log == nil { - if a.Debug { - a.Log = log.New(os.Stderr, "", log.LstdFlags) - } else { - a.Log = log.New(ioutil.Discard, "", log.LstdFlags) - } + a.Log = log.New(ioutil.Discard, "", log.LstdFlags) } return a, nil } +// EnableExponentialBackoff enables use of exponential backoff for next API call(s) +// and use exponential backoff for all API calls until exponential backoff is disabled. +func (a *API) EnableExponentialBackoff() { + a.useExponentialBackoffmu.Lock() + a.useExponentialBackoff = true + a.useExponentialBackoffmu.Unlock() +} + +// DisableExponentialBackoff disables use of exponential backoff. If a request using +// exponential backoff is currently running, it will stop using exponential backoff +// on its next iteration (if needed). +func (a *API) DisableExponentialBackoff() { + a.useExponentialBackoffmu.Lock() + a.useExponentialBackoff = false + a.useExponentialBackoffmu.Unlock() +} + // Get API request func (a *API) Get(reqPath string) ([]byte, error) { - return a.apiCall("GET", reqPath, nil) + return a.apiRequest("GET", reqPath, nil) } // Delete API request func (a *API) Delete(reqPath string) ([]byte, error) { - return a.apiCall("DELETE", reqPath, nil) + return a.apiRequest("DELETE", reqPath, nil) } // Post API request func (a *API) Post(reqPath string, data []byte) ([]byte, error) { - return a.apiCall("POST", reqPath, data) + return a.apiRequest("POST", reqPath, data) } // Put API request func (a *API) Put(reqPath string, data []byte) ([]byte, error) { - return a.apiCall("PUT", reqPath, data) + return a.apiRequest("PUT", reqPath, data) +} + +func backoff(interval uint) float64 { + return math.Floor(((float64(interval) * (1 + rand.Float64())) / 2) + .5) +} + +// apiRequest manages retry strategy for exponential backoffs +func (a *API) apiRequest(reqMethod string, reqPath string, data []byte) ([]byte, error) { + backoffs := []uint{2, 4, 8, 16, 32} + attempts := 0 + success := false + + var result []byte + var err error + + for !success { + result, err = a.apiCall(reqMethod, reqPath, data) + if err == nil { + success = true + } + + // break and return error if not using exponential backoff + if err != nil { + if !a.useExponentialBackoff { + break + } + if matched, _ := regexp.MatchString("code 403", err.Error()); matched { + break + } + } + + if !success { + var wait float64 + if attempts >= len(backoffs) { + wait = backoff(backoffs[len(backoffs)-1]) + } else { + wait = backoff(backoffs[attempts]) + } + attempts++ + a.Log.Printf("[WARN] API call failed %s, retrying in %d seconds.\n", err.Error(), uint(wait)) + time.Sleep(time.Duration(wait) * time.Second) + } + } + + return result, err } // apiCall call Circonus API func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, error) { - dataReader := bytes.NewReader(data) reqURL := a.apiURL.String() + if reqPath == "" { + return nil, errors.New("Invalid URL path") + } if reqPath[:1] != "/" { reqURL += "/" } - if reqPath[:3] == "/v2" { - reqURL += reqPath[3:len(reqPath)] + if len(reqPath) >= 3 && reqPath[:3] == "/v2" { + reqURL += reqPath[3:] } else { reqURL += reqPath } - req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader) - if err != nil { - return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err) - } - req.Header.Add("Accept", "application/json") - req.Header.Add("X-Circonus-Auth-Token", string(a.key)) - req.Header.Add("X-Circonus-App-Name", string(a.app)) - // keep last HTTP error in the event of retry failure var lastHTTPError error retryPolicy := func(resp *http.Response, err error) (bool, error) { @@ -167,38 +275,98 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er // the server time to recover, as 500's are typically not permanent // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || resp.StatusCode >= 500 { - defer resp.Body.Close() + // Retry on 429 (rate limit) as well. + if resp.StatusCode == 0 || // wtf?! + resp.StatusCode >= 500 || // rutroh + resp.StatusCode == 429 { // rate limit body, readErr := ioutil.ReadAll(resp.Body) if readErr != nil { - lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr) + lastHTTPError = fmt.Errorf("- response: %d %s", resp.StatusCode, readErr.Error()) } else { - lastHTTPError = fmt.Errorf("- last HTTP error: %d %s", resp.StatusCode, string(body)) + lastHTTPError = fmt.Errorf("- response: %d %s", resp.StatusCode, strings.TrimSpace(string(body))) } return true, nil } return false, nil } + dataReader := bytes.NewReader(data) + + req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader) + if err != nil { + return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err) + } + req.Header.Add("Accept", "application/json") + req.Header.Add("X-Circonus-Auth-Token", string(a.key)) + req.Header.Add("X-Circonus-App-Name", string(a.app)) + if string(a.accountID) != "" { + req.Header.Add("X-Circonus-Account-ID", string(a.accountID)) + } + client := retryablehttp.NewClient() - client.RetryWaitMin = minRetryWait - client.RetryWaitMax = maxRetryWait - client.RetryMax = maxRetries - client.Logger = a.Log + if a.apiURL.Scheme == "https" && a.caCert != nil { + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &tls.Config{RootCAs: a.caCert}, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } else { + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } + + a.useExponentialBackoffmu.Lock() + eb := a.useExponentialBackoff + a.useExponentialBackoffmu.Unlock() + + if eb { + // limit to one request if using exponential backoff + client.RetryWaitMin = 1 + client.RetryWaitMax = 2 + client.RetryMax = 0 + } else { + client.RetryWaitMin = minRetryWait + client.RetryWaitMax = maxRetryWait + client.RetryMax = maxRetries + } + + // retryablehttp only groks log or no log + if a.Debug { + client.Logger = a.Log + } else { + client.Logger = log.New(ioutil.Discard, "", log.LstdFlags) + } + client.CheckRetry = retryPolicy resp, err := client.Do(req) if err != nil { if lastHTTPError != nil { - return nil, fmt.Errorf("[ERROR] fetching: %+v %+v", err, lastHTTPError) + return nil, lastHTTPError } - return nil, fmt.Errorf("[ERROR] fetching %s: %+v", reqURL, err) + return nil, fmt.Errorf("[ERROR] %s: %+v", reqURL, err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("[ERROR] reading body %+v", err) + return nil, fmt.Errorf("[ERROR] reading response %+v", err) } if resp.StatusCode < 200 || resp.StatusCode >= 300 { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go index 87e2b5e37..459fda6df 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go @@ -2,49 +2,70 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Broker API support - Fetch and Search +// See: https://login.circonus.com/resources/api/calls/broker + package api import ( "encoding/json" "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" ) -// BrokerDetail instance attributes +// BrokerDetail defines instance attributes type BrokerDetail struct { - CN string `json:"cn"` - IP string `json:"ipaddress"` - MinVer int `json:"minimum_version_required"` - Modules []string `json:"modules"` - Port int `json:"port"` - Skew string `json:"skew"` - Status string `json:"status"` - Version int `json:"version"` + CN string `json:"cn"` // string + ExternalHost *string `json:"external_host"` // string or null + ExternalPort uint16 `json:"external_port"` // uint16 + IP *string `json:"ipaddress"` // string or null + MinVer uint `json:"minimum_version_required"` // uint + Modules []string `json:"modules"` // [] len >= 0 + Port *uint16 `json:"port"` // uint16 or null + Skew *string `json:"skew"` // BUG doc: floating point number, api object: string or null + Status string `json:"status"` // string + Version *uint `json:"version"` // uint or null } -// Broker definition +// Broker defines a broker. See https://login.circonus.com/resources/api/calls/broker for more information. type Broker struct { - Cid string `json:"_cid"` - Details []BrokerDetail `json:"_details"` - Latitude string `json:"_latitude"` - Longitude string `json:"_longitude"` - Name string `json:"_name"` - Tags []string `json:"_tags"` - Type string `json:"_type"` + CID string `json:"_cid"` // string + Details []BrokerDetail `json:"_details"` // [] len >= 1 + Latitude *string `json:"_latitude"` // string or null + Longitude *string `json:"_longitude"` // string or null + Name string `json:"_name"` // string + Tags []string `json:"_tags"` // [] len >= 0 + Type string `json:"_type"` // string } -// FetchBrokerByID fetch a broker configuration by [group]id -func (a *API) FetchBrokerByID(id IDType) (*Broker, error) { - cid := CIDType(fmt.Sprintf("/broker/%d", id)) - return a.FetchBrokerByCID(cid) -} +// FetchBroker retrieves broker with passed cid. +func (a *API) FetchBroker(cid CIDType) (*Broker, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid broker CID [none]") + } + + brokerCID := string(*cid) + + matched, err := regexp.MatchString(config.BrokerCIDRegex, brokerCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid broker CID [%s]", brokerCID) + } -// FetchBrokerByCID fetch a broker configuration by cid -func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) { - result, err := a.Get(string(cid)) + result, err := a.Get(brokerCID) if err != nil { return nil, err } + if a.Debug { + a.Log.Printf("[DEBUG] fetch broker, received JSON: %s", string(result)) + } + response := new(Broker) if err := json.Unmarshal(result, &response); err != nil { return nil, err @@ -54,36 +75,57 @@ func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) { } -// FetchBrokerListByTag return list of brokers with a specific tag -func (a *API) FetchBrokerListByTag(searchTag SearchTagType) ([]Broker, error) { - query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", searchTag)) - return a.BrokerSearch(query) -} - -// BrokerSearch return a list of brokers matching a query/filter -func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) { - queryURL := fmt.Sprintf("/broker?%s", string(query)) - - result, err := a.Get(queryURL) +// FetchBrokers returns all brokers available to the API Token. +func (a *API) FetchBrokers() (*[]Broker, error) { + result, err := a.Get(config.BrokerPrefix) if err != nil { return nil, err } - var brokers []Broker - json.Unmarshal(result, &brokers) + var response []Broker + if err := json.Unmarshal(result, &response); err != nil { + return nil, err + } - return brokers, nil + return &response, nil } -// FetchBrokerList return list of all brokers available to the api token/app -func (a *API) FetchBrokerList() ([]Broker, error) { - result, err := a.Get("/broker") +// SearchBrokers returns brokers matching the specified search +// query and/or filter. If nil is passed for both parameters +// all brokers will be returned. +func (a *API) SearchBrokers(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Broker, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchBrokers() + } + + reqURL := url.URL{ + Path: config.BrokerPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) if err != nil { - return nil, err + return nil, fmt.Errorf("[ERROR] API call error %+v", err) } - var response []Broker - json.Unmarshal(result, &response) + var brokers []Broker + if err := json.Unmarshal(result, &brokers); err != nil { + return nil, err + } - return response, nil + return &brokers, nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go index df0524f43..047d71935 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go @@ -2,112 +2,118 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Check API support - Fetch and Search +// See: https://login.circonus.com/resources/api/calls/check +// Notes: checks do not directly support create, update, and delete - see check bundle. + package api import ( "encoding/json" "fmt" "net/url" - "strings" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" ) -// CheckDetails is an arbitrary json structure, we would only care about submission_url -type CheckDetails struct { - SubmissionURL string `json:"submission_url"` -} +// CheckDetails contains [undocumented] check type specific information +type CheckDetails map[config.Key]string -// Check definition +// Check defines a check. See https://login.circonus.com/resources/api/calls/check for more information. type Check struct { - Cid string `json:"_cid"` - Active bool `json:"_active"` - BrokerCid string `json:"_broker"` - CheckBundleCid string `json:"_check_bundle"` - CheckUUID string `json:"_check_uuid"` - Details CheckDetails `json:"_details"` + Active bool `json:"_active"` // bool + BrokerCID string `json:"_broker"` // string + CheckBundleCID string `json:"_check_bundle"` // string + CheckUUID string `json:"_check_uuid"` // string + CID string `json:"_cid"` // string + Details CheckDetails `json:"_details"` // NOTE contents of details are check type specific, map len >= 0 } -// FetchCheckByID fetch a check configuration by id -func (a *API) FetchCheckByID(id IDType) (*Check, error) { - cid := CIDType(fmt.Sprintf("/check/%d", int(id))) - return a.FetchCheckByCID(cid) -} +// FetchCheck retrieves check with passed cid. +func (a *API) FetchCheck(cid CIDType) (*Check, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid check CID [none]") + } + + checkCID := string(*cid) -// FetchCheckByCID fetch a check configuration by cid -func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) { - result, err := a.Get(string(cid)) + matched, err := regexp.MatchString(config.CheckCIDRegex, checkCID) if err != nil { return nil, err } + if !matched { + return nil, fmt.Errorf("Invalid check CID [%s]", checkCID) + } - check := new(Check) - json.Unmarshal(result, check) - - return check, nil -} - -// FetchCheckBySubmissionURL fetch a check configuration by submission_url -func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) { - - u, err := url.Parse(string(submissionURL)) + result, err := a.Get(checkCID) if err != nil { return nil, err } - // valid trap url: scheme://host[:port]/module/httptrap/UUID/secret - - // does it smell like a valid trap url path - if u.Path[:17] != "/module/httptrap/" { - return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL) + if a.Debug { + a.Log.Printf("[DEBUG] fetch check, received JSON: %s", string(result)) } - // extract uuid/secret - pathParts := strings.Split(u.Path[17:len(u.Path)], "/") - if len(pathParts) != 2 { - return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL) + check := new(Check) + if err := json.Unmarshal(result, check); err != nil { + return nil, err } - uuid := pathParts[0] - - query := SearchQueryType(fmt.Sprintf("f__check_uuid=%s", uuid)) + return check, nil +} - checks, err := a.CheckSearch(query) +// FetchChecks retrieves all checks available to the API Token. +func (a *API) FetchChecks() (*[]Check, error) { + result, err := a.Get(config.CheckPrefix) if err != nil { return nil, err } - if len(checks) == 0 { - return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid) + var checks []Check + if err := json.Unmarshal(result, &checks); err != nil { + return nil, err } - numActive := 0 - checkID := -1 + return &checks, nil +} - for idx, check := range checks { - if check.Active { - numActive++ - checkID = idx - } - } +// SearchChecks returns checks matching the specified search query +// and/or filter. If nil is passed for both parameters all checks +// will be returned. +func (a *API) SearchChecks(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Check, error) { + q := url.Values{} - if numActive > 1 { - return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid) + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) } - return &checks[checkID], nil + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } -} + if q.Encode() == "" { + return a.FetchChecks() + } -// CheckSearch returns a list of checks matching a query/filter -func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) { - queryURL := fmt.Sprintf("/check?%s", string(query)) + reqURL := url.URL{ + Path: config.CheckPrefix, + RawQuery: q.Encode(), + } - result, err := a.Get(queryURL) + result, err := a.Get(reqURL.String()) if err != nil { return nil, err } var checks []Check - json.Unmarshal(result, &checks) + if err := json.Unmarshal(result, &checks); err != nil { + return nil, err + } - return checks, nil + return &checks, nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go new file mode 100644 index 000000000..c202853c2 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go @@ -0,0 +1,255 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check bundle API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/check_bundle + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// CheckBundleMetric individual metric configuration +type CheckBundleMetric struct { + Name string `json:"name"` // string + Result *string `json:"result,omitempty"` // string or null, NOTE not settable - return/information value only + Status string `json:"status,omitempty"` // string + Tags []string `json:"tags"` // [] len >= 0 + Type string `json:"type"` // string + Units *string `json:"units,omitempty"` // string or null + +} + +// CheckBundleConfig contains the check type specific configuration settings +// as k/v pairs (see https://login.circonus.com/resources/api/calls/check_bundle +// for the specific settings available for each distinct check type) +type CheckBundleConfig map[config.Key]string + +// CheckBundle defines a check bundle. See https://login.circonus.com/resources/api/calls/check_bundle for more information. +type CheckBundle struct { + Brokers []string `json:"brokers"` // [] len >= 0 + Checks []string `json:"_checks,omitempty"` // [] len >= 0 + CheckUUIDs []string `json:"_check_uuids,omitempty"` // [] len >= 0 + CID string `json:"_cid,omitempty"` // string + Config CheckBundleConfig `json:"config"` // NOTE contents of config are check type specific, map len >= 0 + Created uint `json:"_created,omitempty"` // uint + DisplayName string `json:"display_name"` // string + LastModifedBy string `json:"_last_modifed_by,omitempty"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + MetricLimit int `json:"metric_limit,omitempty"` // int + Metrics []CheckBundleMetric `json:"metrics"` // [] >= 0 + Notes *string `json:"notes,omitempty"` // string or null + Period uint `json:"period,omitempty"` // uint + ReverseConnectURLs []string `json:"_reverse_connection_urls,omitempty"` // [] len >= 0 + Status string `json:"status,omitempty"` // string + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Target string `json:"target"` // string + Timeout float32 `json:"timeout,omitempty"` // float32 + Type string `json:"type"` // string +} + +// NewCheckBundle returns new CheckBundle (with defaults, if applicable) +func NewCheckBundle() *CheckBundle { + return &CheckBundle{ + Config: make(CheckBundleConfig, config.DefaultConfigOptionsSize), + MetricLimit: config.DefaultCheckBundleMetricLimit, + Period: config.DefaultCheckBundlePeriod, + Timeout: config.DefaultCheckBundleTimeout, + Status: config.DefaultCheckBundleStatus, + } +} + +// FetchCheckBundle retrieves check bundle with passed cid. +func (a *API) FetchCheckBundle(cid CIDType) (*CheckBundle, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid check bundle CID [none]") + } + + bundleCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID) + } + + result, err := a.Get(bundleCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch check bundle, received JSON: %s", string(result)) + } + + checkBundle := &CheckBundle{} + if err := json.Unmarshal(result, checkBundle); err != nil { + return nil, err + } + + return checkBundle, nil +} + +// FetchCheckBundles retrieves all check bundles available to the API Token. +func (a *API) FetchCheckBundles() (*[]CheckBundle, error) { + result, err := a.Get(config.CheckBundlePrefix) + if err != nil { + return nil, err + } + + var checkBundles []CheckBundle + if err := json.Unmarshal(result, &checkBundles); err != nil { + return nil, err + } + + return &checkBundles, nil +} + +// UpdateCheckBundle updates passed check bundle. +func (a *API) UpdateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid check bundle config [nil]") + } + + bundleCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle CID [%s]", bundleCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update check bundle, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(bundleCID, jsonCfg) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + if err := json.Unmarshal(result, checkBundle); err != nil { + return nil, err + } + + return checkBundle, nil +} + +// CreateCheckBundle creates a new check bundle (check). +func (a *API) CreateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid check bundle config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create check bundle, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.CheckBundlePrefix, jsonCfg) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + if err := json.Unmarshal(result, checkBundle); err != nil { + return nil, err + } + + return checkBundle, nil +} + +// DeleteCheckBundle deletes passed check bundle. +func (a *API) DeleteCheckBundle(cfg *CheckBundle) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid check bundle config [nil]") + } + return a.DeleteCheckBundleByCID(CIDType(&cfg.CID)) +} + +// DeleteCheckBundleByCID deletes check bundle with passed cid. +func (a *API) DeleteCheckBundleByCID(cid CIDType) (bool, error) { + + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid check bundle CID [none]") + } + + bundleCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID) + } + + _, err = a.Delete(bundleCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchCheckBundles returns check bundles matching the specified +// search query and/or filter. If nil is passed for both parameters +// all check bundles will be returned. +func (a *API) SearchCheckBundles(searchCriteria *SearchQueryType, filterCriteria *map[string][]string) (*[]CheckBundle, error) { + + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchCheckBundles() + } + + reqURL := url.URL{ + Path: config.CheckBundlePrefix, + RawQuery: q.Encode(), + } + + resp, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var results []CheckBundle + if err := json.Unmarshal(resp, &results); err != nil { + return nil, err + } + + return &results, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go new file mode 100644 index 000000000..817c7b891 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go @@ -0,0 +1,95 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CheckBundleMetrics API support - Fetch, Create*, Update, and Delete** +// See: https://login.circonus.com/resources/api/calls/check_bundle_metrics +// * : create metrics by adding to array with a status of 'active' +// ** : delete (distable collection of) metrics by changing status from 'active' to 'available' + +package api + +import ( + "encoding/json" + "fmt" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// CheckBundleMetrics defines metrics for a specific check bundle. See https://login.circonus.com/resources/api/calls/check_bundle_metrics for more information. +type CheckBundleMetrics struct { + CID string `json:"_cid,omitempty"` // string + Metrics []CheckBundleMetric `json:"metrics"` // See check_bundle.go for CheckBundleMetric definition +} + +// FetchCheckBundleMetrics retrieves metrics for the check bundle with passed cid. +func (a *API) FetchCheckBundleMetrics(cid CIDType) (*CheckBundleMetrics, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid check bundle metrics CID [none]") + } + + metricsCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID) + } + + result, err := a.Get(metricsCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch check bundle metrics, received JSON: %s", string(result)) + } + + metrics := &CheckBundleMetrics{} + if err := json.Unmarshal(result, metrics); err != nil { + return nil, err + } + + return metrics, nil +} + +// UpdateCheckBundleMetrics updates passed metrics. +func (a *API) UpdateCheckBundleMetrics(cfg *CheckBundleMetrics) (*CheckBundleMetrics, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid check bundle metrics config [nil]") + } + + metricsCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update check bundle metrics, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(metricsCID, jsonCfg) + if err != nil { + return nil, err + } + + metrics := &CheckBundleMetrics{} + if err := json.Unmarshal(result, metrics); err != nil { + return nil, err + } + + return metrics, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go deleted file mode 100644 index 559df7ac0..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package api - -import ( - "encoding/json" - "fmt" -) - -// CheckBundleConfig configuration specific to check type -type CheckBundleConfig struct { - AsyncMetrics bool `json:"async_metrics"` - Secret string `json:"secret"` - SubmissionURL string `json:"submission_url"` -} - -// CheckBundleMetric individual metric configuration -type CheckBundleMetric struct { - Name string `json:"name"` - Type string `json:"type"` - Units string `json:"units"` - Status string `json:"status"` -} - -// CheckBundle definition -type CheckBundle struct { - CheckUUIDs []string `json:"_check_uuids,omitempty"` - Checks []string `json:"_checks,omitempty"` - Cid string `json:"_cid,omitempty"` - Created int `json:"_created,omitempty"` - LastModified int `json:"_last_modified,omitempty"` - LastModifedBy string `json:"_last_modifed_by,omitempty"` - ReverseConnectUrls []string `json:"_reverse_connection_urls,omitempty"` - Brokers []string `json:"brokers"` - Config CheckBundleConfig `json:"config"` - DisplayName string `json:"display_name"` - Metrics []CheckBundleMetric `json:"metrics"` - MetricLimit int `json:"metric_limit"` - Notes string `json:"notes"` - Period int `json:"period"` - Status string `json:"status"` - Tags []string `json:"tags"` - Target string `json:"target"` - Timeout int `json:"timeout"` - Type string `json:"type"` -} - -// FetchCheckBundleByID fetch a check bundle configuration by id -func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) { - cid := CIDType(fmt.Sprintf("/check_bundle/%d", id)) - return a.FetchCheckBundleByCID(cid) -} - -// FetchCheckBundleByCID fetch a check bundle configuration by id -func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) { - result, err := a.Get(string(cid)) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - json.Unmarshal(result, checkBundle) - - return checkBundle, nil -} - -// CheckBundleSearch returns list of check bundles matching a search query -// - a search query not a filter (see: https://login.circonus.com/resources/api#searching) -func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) { - apiPath := fmt.Sprintf("/check_bundle?search=%s", searchCriteria) - - response, err := a.Get(apiPath) - if err != nil { - return nil, fmt.Errorf("[ERROR] API call error %+v", err) - } - - var results []CheckBundle - err = json.Unmarshal(response, &results) - if err != nil { - return nil, fmt.Errorf("[ERROR] Parsing JSON response %+v", err) - } - - return results, nil -} - -// CreateCheckBundle create a new check bundle (check) -func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) { - cfgJSON, err := json.Marshal(config) - if err != nil { - return nil, err - } - - response, err := a.Post("/check_bundle", cfgJSON) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - err = json.Unmarshal(response, checkBundle) - if err != nil { - return nil, err - } - - return checkBundle, nil -} - -// UpdateCheckBundle updates a check bundle configuration -func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) { - if a.Debug { - a.Log.Printf("[DEBUG] Updating check bundle with new metrics.") - } - - cfgJSON, err := json.Marshal(config) - if err != nil { - return nil, err - } - - response, err := a.Put(config.Cid, cfgJSON) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - err = json.Unmarshal(response, checkBundle) - if err != nil { - return nil, err - } - - return checkBundle, nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go new file mode 100644 index 000000000..bbca43d03 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go @@ -0,0 +1,538 @@ +package config + +// Key for CheckBundleConfig options and CheckDetails info +type Key string + +// Constants per type as defined in +// https://login.circonus.com/resources/api/calls/check_bundle +const ( + // + // default settings for api.NewCheckBundle() + // + DefaultCheckBundleMetricLimit = -1 // unlimited + DefaultCheckBundleStatus = "active" + DefaultCheckBundlePeriod = 60 + DefaultCheckBundleTimeout = 10 + DefaultConfigOptionsSize = 20 + + // + // common (apply to more than one check type) + // + AsyncMetrics = Key("asynch_metrics") + AuthMethod = Key("auth_method") + AuthPassword = Key("auth_password") + AuthUser = Key("auth_user") + BaseURL = Key("base_url") + CAChain = Key("ca_chain") + CertFile = Key("certificate_file") + Ciphers = Key("ciphers") + Command = Key("command") + DSN = Key("dsn") + HeaderPrefix = Key("header_") + HTTPVersion = Key("http_version") + KeyFile = Key("key_file") + Method = Key("method") + Password = Key("password") + Payload = Key("payload") + Port = Key("port") + Query = Key("query") + ReadLimit = Key("read_limit") + Secret = Key("secret") + SQL = Key("sql") + URI = Key("uri") + URL = Key("url") + Username = Key("username") + UseSSL = Key("use_ssl") + User = Key("user") + SASLAuthentication = Key("sasl_authentication") + SASLUser = Key("sasl_user") + SecurityLevel = Key("security_level") + Version = Key("version") + AppendColumnName = Key("append_column_name") + Database = Key("database") + JDBCPrefix = Key("jdbc_") + + // + // CAQL check + // + // Common items: + // Query + + // + // Circonus Windows Agent + // + // Common items: + // AuthPassword + // AuthUser + // Port + // URL + Calculated = Key("calculated") + Category = Key("category") + + // + // Cloudwatch + // + // Notes: + // DimPrefix is special because the actual key is dynamic and matches: `dim_(.+)` + // Common items: + // URL + // Version + APIKey = Key("api_key") + APISecret = Key("api_secret") + CloudwatchMetrics = Key("cloudwatch_metrics") + DimPrefix = Key("dim_") + Granularity = Key("granularity") + Namespace = Key("namespace") + Statistics = Key("statistics") + + // + // Collectd + // + // Common items: + // AsyncMetrics + // Username + // Secret + // SecurityLevel + + // + // Composite + // + CompositeMetricName = Key("composite_metric_name") + Formula = Key("formula") + + // + // DHCP + // + HardwareAddress = Key("hardware_addr") + HostIP = Key("host_ip") + RequestType = Key("request_type") + SendPort = Key("send_port") + + // + // DNS + // + // Common items: + // Query + CType = Key("ctype") + Nameserver = Key("nameserver") + RType = Key("rtype") + + // + // EC Console + // + // Common items: + // Command + // Port + // SASLAuthentication + // SASLUser + Objects = Key("objects") + XPath = Key("xpath") + + // + // Elastic Search + // + // Common items: + // Port + // URL + + // + // Ganglia + // + // Common items: + // AsyncMetrics + + // + // Google Analytics + // + // Common items: + // Password + // Username + OAuthToken = Key("oauth_token") + OAuthTokenSecret = Key("oauth_token_secret") + OAuthVersion = Key("oauth_version") + TableID = Key("table_id") + UseOAuth = Key("use_oauth") + + // + // HA Proxy + // + // Common items: + // AuthPassword + // AuthUser + // Port + // UseSSL + Host = Key("host") + Select = Key("select") + + // + // HTTP + // + // Notes: + // HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)` + // Common items: + // AuthMethod + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // KeyFile + // URL + // HeaderPrefix + // HTTPVersion + // Method + // Payload + // ReadLimit + Body = Key("body") + Code = Key("code") + Extract = Key("extract") + Redirects = Key("redirects") + + // + // HTTPTRAP + // + // Common items: + // AsyncMetrics + // Secret + + // + // IMAP + // + // Common items: + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // KeyFile + // Port + // UseSSL + Fetch = Key("fetch") + Folder = Key("folder") + HeaderHost = Key("header_Host") + Search = Key("search") + + // + // JMX + // + // Common items: + // Password + // Port + // URI + // Username + MbeanDomains = Key("mbean_domains") + + // + // JSON + // + // Common items: + // AuthMethod + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // HeaderPrefix + // HTTPVersion + // KeyFile + // Method + // Payload + // Port + // ReadLimit + // URL + + // + // Keynote + // + // Notes: + // SlotAliasPrefix is special because the actual key is dynamic and matches: `slot_alias_(\d+)` + // Common items: + // APIKey + // BaseURL + PageComponent = Key("pagecomponent") + SlotAliasPrefix = Key("slot_alias_") + SlotIDList = Key("slot_id_list") + TransPageList = Key("transpagelist") + + // + // Keynote Pulse + // + // Common items: + // BaseURL + // Password + // User + AgreementID = Key("agreement_id") + + // + // LDAP + // + // Common items: + // Password + // Port + AuthType = Key("authtype") + DN = Key("dn") + SecurityPrincipal = Key("security_principal") + + // + // Memcached + // + // Common items: + // Port + + // + // MongoDB + // + // Common items: + // Command + // Password + // Port + // Username + DBName = Key("dbname") + + // + // Munin + // + // Note: no configuration options + + // + // MySQL + // + // Common items: + // DSN + // SQL + + // + // Newrelic rpm + // + // Common items: + // APIKey + AccountID = Key("acct_id") + ApplicationID = Key("application_id") + LicenseKey = Key("license_key") + + // + // Nginx + // + // Common items: + // CAChain + // CertFile + // Ciphers + // KeyFile + // URL + + // + // NRPE + // + // Common items: + // Command + // Port + // UseSSL + AppendUnits = Key("append_uom") + + // + // NTP + // + // Common items: + // Port + Control = Key("control") + + // + // Oracle + // + // Notes: + // JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)` + // Common items: + // AppendColumnName + // Database + // JDBCPrefix + // Password + // Port + // SQL + // User + + // + // Ping ICMP + // + AvailNeeded = Key("avail_needed") + Count = Key("count") + Interval = Key("interval") + + // + // PostgreSQL + // + // Common items: + // DSN + // SQL + + // + // Redis + // + // Common items: + // Command + // Password + // Port + DBIndex = Key("dbindex") + + // + // Resmon + // + // Notes: + // HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)` + // Common items: + // AuthMethod + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // HeaderPrefix + // HTTPVersion + // KeyFile + // Method + // Payload + // Port + // ReadLimit + // URL + + // + // SMTP + // + // Common items: + // Payload + // Port + // SASLAuthentication + // SASLUser + EHLO = Key("ehlo") + From = Key("from") + SASLAuthID = Key("sasl_auth_id") + SASLPassword = Key("sasl_password") + StartTLS = Key("starttls") + To = Key("to") + + // + // SNMP + // + // Notes: + // OIDPrefix is special because the actual key is dynamic and matches: `oid_(.+)` + // TypePrefix is special because the actual key is dynamic and matches: `type_(.+)` + // Common items: + // Port + // SecurityLevel + // Version + AuthPassphrase = Key("auth_passphrase") + AuthProtocol = Key("auth_protocol") + Community = Key("community") + ContextEngine = Key("context_engine") + ContextName = Key("context_name") + OIDPrefix = Key("oid_") + PrivacyPassphrase = Key("privacy_passphrase") + PrivacyProtocol = Key("privacy_protocol") + SecurityEngine = Key("security_engine") + SecurityName = Key("security_name") + SeparateQueries = Key("separate_queries") + TypePrefix = Key("type_") + + // + // SQLServer + // + // Notes: + // JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)` + // Common items: + // AppendColumnName + // Database + // JDBCPrefix + // Password + // Port + // SQL + // User + + // + // SSH v2 + // + // Common items: + // Port + MethodCompCS = Key("method_comp_cs") + MethodCompSC = Key("method_comp_sc") + MethodCryptCS = Key("method_crypt_cs") + MethodCryptSC = Key("method_crypt_sc") + MethodHostKey = Key("method_hostkey") + MethodKeyExchange = Key("method_kex") + MethodMacCS = Key("method_mac_cs") + MethodMacSC = Key("method_mac_sc") + + // + // StatsD + // + // Note: no configuration options + + // + // TCP + // + // Common items: + // CAChain + // CertFile + // Ciphers + // KeyFile + // Port + // UseSSL + BannerMatch = Key("banner_match") + + // + // Varnish + // + // Note: no configuration options + + // + // reserved - config option(s) can't actually be set - here for r/o access + // + ReverseSecretKey = Key("reverse:secret_key") + SubmissionURL = Key("submission_url") + + // + // Endpoint prefix & cid regex + // + DefaultCIDRegex = "[0-9]+" + DefaultUUIDRegex = "[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}" + AccountPrefix = "/account" + AccountCIDRegex = "^(" + AccountPrefix + "/(" + DefaultCIDRegex + "|current))$" + AcknowledgementPrefix = "/acknowledgement" + AcknowledgementCIDRegex = "^(" + AcknowledgementPrefix + "/(" + DefaultCIDRegex + "))$" + AlertPrefix = "/alert" + AlertCIDRegex = "^(" + AlertPrefix + "/(" + DefaultCIDRegex + "))$" + AnnotationPrefix = "/annotation" + AnnotationCIDRegex = "^(" + AnnotationPrefix + "/(" + DefaultCIDRegex + "))$" + BrokerPrefix = "/broker" + BrokerCIDRegex = "^(" + BrokerPrefix + "/(" + DefaultCIDRegex + "))$" + CheckBundleMetricsPrefix = "/check_bundle_metrics" + CheckBundleMetricsCIDRegex = "^(" + CheckBundleMetricsPrefix + "/(" + DefaultCIDRegex + "))$" + CheckBundlePrefix = "/check_bundle" + CheckBundleCIDRegex = "^(" + CheckBundlePrefix + "/(" + DefaultCIDRegex + "))$" + CheckPrefix = "/check" + CheckCIDRegex = "^(" + CheckPrefix + "/(" + DefaultCIDRegex + "))$" + ContactGroupPrefix = "/contact_group" + ContactGroupCIDRegex = "^(" + ContactGroupPrefix + "/(" + DefaultCIDRegex + "))$" + DashboardPrefix = "/dashboard" + DashboardCIDRegex = "^(" + DashboardPrefix + "/(" + DefaultCIDRegex + "))$" + GraphPrefix = "/graph" + GraphCIDRegex = "^(" + GraphPrefix + "/(" + DefaultUUIDRegex + "))$" + MaintenancePrefix = "/maintenance" + MaintenanceCIDRegex = "^(" + MaintenancePrefix + "/(" + DefaultCIDRegex + "))$" + MetricClusterPrefix = "/metric_cluster" + MetricClusterCIDRegex = "^(" + MetricClusterPrefix + "/(" + DefaultCIDRegex + "))$" + MetricPrefix = "/metric" + MetricCIDRegex = "^(" + MetricPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$" + OutlierReportPrefix = "/outlier_report" + OutlierReportCIDRegex = "^(" + OutlierReportPrefix + "/(" + DefaultCIDRegex + "))$" + ProvisionBrokerPrefix = "/provision_broker" + ProvisionBrokerCIDRegex = "^(" + ProvisionBrokerPrefix + "/([a-z0-9]+-[a-z0-9]+))$" + RuleSetGroupPrefix = "/rule_set_group" + RuleSetGroupCIDRegex = "^(" + RuleSetGroupPrefix + "/(" + DefaultCIDRegex + "))$" + RuleSetPrefix = "/rule_set" + RuleSetCIDRegex = "^(" + RuleSetPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$" + UserPrefix = "/user" + UserCIDRegex = "^(" + UserPrefix + "/(" + DefaultCIDRegex + "|current))$" + WorksheetPrefix = "/worksheet" + WorksheetCIDRegex = "^(" + WorksheetPrefix + "/(" + DefaultUUIDRegex + "))$" + // contact group serverity levels + NumSeverityLevels = 5 +) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go new file mode 100644 index 000000000..578a2e898 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go @@ -0,0 +1,263 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Contact Group API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/contact_group + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// ContactGroupAlertFormats define alert formats +type ContactGroupAlertFormats struct { + LongMessage *string `json:"long_message"` // string or null + LongSubject *string `json:"long_subject"` // string or null + LongSummary *string `json:"long_summary"` // string or null + ShortMessage *string `json:"short_message"` // string or null + ShortSummary *string `json:"short_summary"` // string or null +} + +// ContactGroupContactsExternal external contacts +type ContactGroupContactsExternal struct { + Info string `json:"contact_info"` // string + Method string `json:"method"` // string +} + +// ContactGroupContactsUser user contacts +type ContactGroupContactsUser struct { + Info string `json:"_contact_info,omitempty"` // string + Method string `json:"method"` // string + UserCID string `json:"user"` // string +} + +// ContactGroupContacts list of contacts +type ContactGroupContacts struct { + External []ContactGroupContactsExternal `json:"external"` // [] len >= 0 + Users []ContactGroupContactsUser `json:"users"` // [] len >= 0 +} + +// ContactGroupEscalation defines escalations for severity levels +type ContactGroupEscalation struct { + After uint `json:"after"` // uint + ContactGroupCID string `json:"contact_group"` // string +} + +// ContactGroup defines a contact group. See https://login.circonus.com/resources/api/calls/contact_group for more information. +type ContactGroup struct { + AggregationWindow uint `json:"aggregation_window,omitempty"` // uint + AlertFormats ContactGroupAlertFormats `json:"alert_formats,omitempty"` // ContactGroupAlertFormats + CID string `json:"_cid,omitempty"` // string + Contacts ContactGroupContacts `json:"contacts,omitempty"` // ContactGroupContacts + Escalations []*ContactGroupEscalation `json:"escalations,omitempty"` // [] len == 5, elements: ContactGroupEscalation or null + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + Name string `json:"name,omitempty"` // string + Reminders []uint `json:"reminders,omitempty"` // [] len == 5 + Tags []string `json:"tags,omitempty"` // [] len >= 0 +} + +// NewContactGroup returns a ContactGroup (with defaults, if applicable) +func NewContactGroup() *ContactGroup { + return &ContactGroup{ + Escalations: make([]*ContactGroupEscalation, config.NumSeverityLevels), + Reminders: make([]uint, config.NumSeverityLevels), + Contacts: ContactGroupContacts{ + External: []ContactGroupContactsExternal{}, + Users: []ContactGroupContactsUser{}, + }, + } +} + +// FetchContactGroup retrieves contact group with passed cid. +func (a *API) FetchContactGroup(cid CIDType) (*ContactGroup, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid contact group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID) + } + + result, err := a.Get(groupCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch contact group, received JSON: %s", string(result)) + } + + group := new(ContactGroup) + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// FetchContactGroups retrieves all contact groups available to the API Token. +func (a *API) FetchContactGroups() (*[]ContactGroup, error) { + result, err := a.Get(config.ContactGroupPrefix) + if err != nil { + return nil, err + } + + var groups []ContactGroup + if err := json.Unmarshal(result, &groups); err != nil { + return nil, err + } + + return &groups, nil +} + +// UpdateContactGroup updates passed contact group. +func (a *API) UpdateContactGroup(cfg *ContactGroup) (*ContactGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid contact group config [nil]") + } + + groupCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update contact group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(groupCID, jsonCfg) + if err != nil { + return nil, err + } + + group := &ContactGroup{} + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// CreateContactGroup creates a new contact group. +func (a *API) CreateContactGroup(cfg *ContactGroup) (*ContactGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid contact group config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create contact group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.ContactGroupPrefix, jsonCfg) + if err != nil { + return nil, err + } + + group := &ContactGroup{} + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// DeleteContactGroup deletes passed contact group. +func (a *API) DeleteContactGroup(cfg *ContactGroup) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid contact group config [nil]") + } + return a.DeleteContactGroupByCID(CIDType(&cfg.CID)) +} + +// DeleteContactGroupByCID deletes contact group with passed cid. +func (a *API) DeleteContactGroupByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid contact group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid contact group CID [%s]", groupCID) + } + + _, err = a.Delete(groupCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchContactGroups returns contact groups matching the specified +// search query and/or filter. If nil is passed for both parameters +// all contact groups will be returned. +func (a *API) SearchContactGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]ContactGroup, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchContactGroups() + } + + reqURL := url.URL{ + Path: config.ContactGroupPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var groups []ContactGroup + if err := json.Unmarshal(result, &groups); err != nil { + return nil, err + } + + return &groups, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go new file mode 100644 index 000000000..b21987387 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go @@ -0,0 +1,399 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Dashboard API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/dashboard + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// DashboardGridLayout defines layout +type DashboardGridLayout struct { + Height uint `json:"height"` + Width uint `json:"width"` +} + +// DashboardAccessConfig defines access config +type DashboardAccessConfig struct { + BlackDash bool `json:"black_dash,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Fullscreen bool `json:"fullscreen,omitempty"` + FullscreenHideTitle bool `json:"fullscreen_hide_title,omitempty"` + Nickname string `json:"nickname,omitempty"` + ScaleText bool `json:"scale_text,omitempty"` + SharedID string `json:"shared_id,omitempty"` + TextSize uint `json:"text_size,omitempty"` +} + +// DashboardOptions defines options +type DashboardOptions struct { + AccessConfigs []DashboardAccessConfig `json:"access_configs,omitempty"` + FullscreenHideTitle bool `json:"fullscreen_hide_title,omitempty"` + HideGrid bool `json:"hide_grid,omitempty"` + Linkages [][]string `json:"linkages,omitempty"` + ScaleText bool `json:"scale_text,omitempty"` + TextSize uint `json:"text_size,omitempty"` +} + +// ChartTextWidgetDatapoint defines datapoints for charts +type ChartTextWidgetDatapoint struct { + AccountID string `json:"account_id,omitempty"` // metric cluster, metric + CheckID uint `json:"_check_id,omitempty"` // metric + ClusterID uint `json:"cluster_id,omitempty"` // metric cluster + ClusterTitle string `json:"_cluster_title,omitempty"` // metric cluster + Label string `json:"label,omitempty"` // metric + Label2 string `json:"_label,omitempty"` // metric cluster + Metric string `json:"metric,omitempty"` // metric + MetricType string `json:"_metric_type,omitempty"` // metric + NumericOnly bool `json:"numeric_only,omitempty"` // metric cluster +} + +// ChartWidgetDefinitionLegend defines chart widget definition legend +type ChartWidgetDefinitionLegend struct { + Show bool `json:"show,omitempty"` + Type string `json:"type,omitempty"` +} + +// ChartWidgetWedgeLabels defines chart widget wedge labels +type ChartWidgetWedgeLabels struct { + OnChart bool `json:"on_chart,omitempty"` + ToolTips bool `json:"tooltips,omitempty"` +} + +// ChartWidgetWedgeValues defines chart widget wedge values +type ChartWidgetWedgeValues struct { + Angle string `json:"angle,omitempty"` + Color string `json:"color,omitempty"` + Show bool `json:"show,omitempty"` +} + +// ChartWidgtDefinition defines chart widget definition +type ChartWidgtDefinition struct { + Datasource string `json:"datasource,omitempty"` + Derive string `json:"derive,omitempty"` + DisableAutoformat bool `json:"disable_autoformat,omitempty"` + Formula string `json:"formula,omitempty"` + Legend ChartWidgetDefinitionLegend `json:"legend,omitempty"` + Period uint `json:"period,omitempty"` + PopOnHover bool `json:"pop_onhover,omitempty"` + WedgeLabels ChartWidgetWedgeLabels `json:"wedge_labels,omitempty"` + WedgeValues ChartWidgetWedgeValues `json:"wedge_values,omitempty"` +} + +// ForecastGaugeWidgetThresholds defines forecast widget thresholds +type ForecastGaugeWidgetThresholds struct { + Colors []string `json:"colors,omitempty"` // forecasts, gauges + Flip bool `json:"flip,omitempty"` // gauges + Values []string `json:"values,omitempty"` // forecasts, gauges +} + +// StatusWidgetAgentStatusSettings defines agent status settings +type StatusWidgetAgentStatusSettings struct { + Search string `json:"search,omitempty"` + ShowAgentTypes string `json:"show_agent_types,omitempty"` + ShowContact bool `json:"show_contact,omitempty"` + ShowFeeds bool `json:"show_feeds,omitempty"` + ShowSetup bool `json:"show_setup,omitempty"` + ShowSkew bool `json:"show_skew,omitempty"` + ShowUpdates bool `json:"show_updates,omitempty"` +} + +// StatusWidgetHostStatusSettings defines host status settings +type StatusWidgetHostStatusSettings struct { + LayoutStyle string `json:"layout_style,omitempty"` + Search string `json:"search,omitempty"` + SortBy string `json:"sort_by,omitempty"` + TagFilterSet []string `json:"tag_filter_set,omitempty"` +} + +// DashboardWidgetSettings defines settings specific to widget +type DashboardWidgetSettings struct { + AccountID string `json:"account_id,omitempty"` // alerts, clusters, gauges, graphs, lists, status + Acknowledged string `json:"acknowledged,omitempty"` // alerts + AgentStatusSettings StatusWidgetAgentStatusSettings `json:"agent_status_settings,omitempty"` // status + Algorithm string `json:"algorithm,omitempty"` // clusters + Autoformat bool `json:"autoformat,omitempty"` // text + BodyFormat string `json:"body_format,omitempty"` // text + ChartType string `json:"chart_type,omitempty"` // charts + CheckUUID string `json:"check_uuid,omitempty"` // gauges + Cleared string `json:"cleared,omitempty"` // alerts + ClusterID uint `json:"cluster_id,omitempty"` // clusters + ClusterName string `json:"cluster_name,omitempty"` // clusters + ContactGroups []uint `json:"contact_groups,omitempty"` // alerts + ContentType string `json:"content_type,omitempty"` // status + Datapoints []ChartTextWidgetDatapoint `json:"datapoints,omitempty"` // charts, text + DateWindow string `json:"date_window,omitempty"` // graphs + Definition ChartWidgtDefinition `json:"definition,omitempty"` // charts + Dependents string `json:"dependents,omitempty"` // alerts + DisableAutoformat bool `json:"disable_autoformat,omitempty"` // gauges + Display string `json:"display,omitempty"` // alerts + Format string `json:"format,omitempty"` // forecasts + Formula string `json:"formula,omitempty"` // gauges + GraphUUID string `json:"graph_id,omitempty"` // graphs + HideXAxis bool `json:"hide_xaxis,omitempty"` // graphs + HideYAxis bool `json:"hide_yaxis,omitempty"` // graphs + HostStatusSettings StatusWidgetHostStatusSettings `json:"host_status_settings,omitempty"` // status + KeyInline bool `json:"key_inline,omitempty"` // graphs + KeyLoc string `json:"key_loc,omitempty"` // graphs + KeySize uint `json:"key_size,omitempty"` // graphs + KeyWrap bool `json:"key_wrap,omitempty"` // graphs + Label string `json:"label,omitempty"` // graphs + Layout string `json:"layout,omitempty"` // clusters + Limit uint `json:"limit,omitempty"` // lists + Maintenance string `json:"maintenance,omitempty"` // alerts + Markup string `json:"markup,omitempty"` // html + MetricDisplayName string `json:"metric_display_name,omitempty"` // gauges + MetricName string `json:"metric_name,omitempty"` // gauges + MinAge string `json:"min_age,omitempty"` // alerts + OffHours []uint `json:"off_hours,omitempty"` // alerts + OverlaySetID string `json:"overlay_set_id,omitempty"` // graphs + Period uint `json:"period,omitempty"` // gauges, text, graphs + RangeHigh int `json:"range_high,omitempty"` // gauges + RangeLow int `json:"range_low,omitempty"` // gauges + Realtime bool `json:"realtime,omitempty"` // graphs + ResourceLimit string `json:"resource_limit,omitempty"` // forecasts + ResourceUsage string `json:"resource_usage,omitempty"` // forecasts + Search string `json:"search,omitempty"` // alerts, lists + Severity string `json:"severity,omitempty"` // alerts + ShowFlags bool `json:"show_flags,omitempty"` // graphs + Size string `json:"size,omitempty"` // clusters + TagFilterSet []string `json:"tag_filter_set,omitempty"` // alerts + Threshold float32 `json:"threshold,omitempty"` // clusters + Thresholds ForecastGaugeWidgetThresholds `json:"thresholds,omitempty"` // forecasts, gauges + TimeWindow string `json:"time_window,omitempty"` // alerts + Title string `json:"title,omitempty"` // alerts, charts, forecasts, gauges, html + TitleFormat string `json:"title_format,omitempty"` // text + Trend string `json:"trend,omitempty"` // forecasts + Type string `json:"type,omitempty"` // gauges, lists + UseDefault bool `json:"use_default,omitempty"` // text + ValueType string `json:"value_type,omitempty"` // gauges, text + WeekDays []string `json:"weekdays,omitempty"` // alerts +} + +// DashboardWidget defines widget +type DashboardWidget struct { + Active bool `json:"active"` + Height uint `json:"height"` + Name string `json:"name"` + Origin string `json:"origin"` + Settings DashboardWidgetSettings `json:"settings"` + Type string `json:"type"` + WidgetID string `json:"widget_id"` + Width uint `json:"width"` +} + +// Dashboard defines a dashboard. See https://login.circonus.com/resources/api/calls/dashboard for more information. +type Dashboard struct { + AccountDefault bool `json:"account_default"` + Active bool `json:"_active,omitempty"` + CID string `json:"_cid,omitempty"` + Created uint `json:"_created,omitempty"` + CreatedBy string `json:"_created_by,omitempty"` + GridLayout DashboardGridLayout `json:"grid_layout"` + LastModified uint `json:"_last_modified,omitempty"` + Options DashboardOptions `json:"options"` + Shared bool `json:"shared"` + Title string `json:"title"` + UUID string `json:"_dashboard_uuid,omitempty"` + Widgets []DashboardWidget `json:"widgets"` +} + +// NewDashboard returns a new Dashboard (with defaults, if applicable) +func NewDashboard() *Dashboard { + return &Dashboard{} +} + +// FetchDashboard retrieves dashboard with passed cid. +func (a *API) FetchDashboard(cid CIDType) (*Dashboard, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid dashboard CID [none]") + } + + dashboardCID := string(*cid) + + matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID) + } + + result, err := a.Get(string(*cid)) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch dashboard, received JSON: %s", string(result)) + } + + dashboard := new(Dashboard) + if err := json.Unmarshal(result, dashboard); err != nil { + return nil, err + } + + return dashboard, nil +} + +// FetchDashboards retrieves all dashboards available to the API Token. +func (a *API) FetchDashboards() (*[]Dashboard, error) { + result, err := a.Get(config.DashboardPrefix) + if err != nil { + return nil, err + } + + var dashboards []Dashboard + if err := json.Unmarshal(result, &dashboards); err != nil { + return nil, err + } + + return &dashboards, nil +} + +// UpdateDashboard updates passed dashboard. +func (a *API) UpdateDashboard(cfg *Dashboard) (*Dashboard, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid dashboard config [nil]") + } + + dashboardCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update dashboard, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(dashboardCID, jsonCfg) + if err != nil { + return nil, err + } + + dashboard := &Dashboard{} + if err := json.Unmarshal(result, dashboard); err != nil { + return nil, err + } + + return dashboard, nil +} + +// CreateDashboard creates a new dashboard. +func (a *API) CreateDashboard(cfg *Dashboard) (*Dashboard, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid dashboard config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create dashboard, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.DashboardPrefix, jsonCfg) + if err != nil { + return nil, err + } + + dashboard := &Dashboard{} + if err := json.Unmarshal(result, dashboard); err != nil { + return nil, err + } + + return dashboard, nil +} + +// DeleteDashboard deletes passed dashboard. +func (a *API) DeleteDashboard(cfg *Dashboard) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid dashboard config [nil]") + } + return a.DeleteDashboardByCID(CIDType(&cfg.CID)) +} + +// DeleteDashboardByCID deletes dashboard with passed cid. +func (a *API) DeleteDashboardByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid dashboard CID [none]") + } + + dashboardCID := string(*cid) + + matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID) + } + + _, err = a.Delete(dashboardCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchDashboards returns dashboards matching the specified +// search query and/or filter. If nil is passed for both parameters +// all dashboards will be returned. +func (a *API) SearchDashboards(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Dashboard, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchDashboards() + } + + reqURL := url.URL{ + Path: config.DashboardPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var dashboards []Dashboard + if err := json.Unmarshal(result, &dashboards); err != nil { + return nil, err + } + + return &dashboards, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go new file mode 100644 index 000000000..63904d784 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go @@ -0,0 +1,63 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package api provides methods for interacting with the Circonus API. See the full Circonus API +Documentation at https://login.circonus.com/resources/api for more information. + +Raw REST methods + + Get - retrieve existing item(s) + Put - update an existing item + Post - create a new item + Delete - remove an existing item + +Endpoints (supported) + + Account https://login.circonus.com/resources/api/calls/account + Acknowledgement https://login.circonus.com/resources/api/calls/acknowledgement + Alert https://login.circonus.com/resources/api/calls/alert + Annotation https://login.circonus.com/resources/api/calls/annotation + Broker https://login.circonus.com/resources/api/calls/broker + Check https://login.circonus.com/resources/api/calls/check + Check Bundle https://login.circonus.com/resources/api/calls/check_bundle + Check Bundle Metrics https://login.circonus.com/resources/api/calls/check_bundle_metrics + Contact Group https://login.circonus.com/resources/api/calls/contact_group + Dashboard https://login.circonus.com/resources/api/calls/dashboard + Graph https://login.circonus.com/resources/api/calls/graph + Maintenance [window] https://login.circonus.com/resources/api/calls/maintenance + Metric https://login.circonus.com/resources/api/calls/metric + Metric Cluster https://login.circonus.com/resources/api/calls/metric_cluster + Outlier Report https://login.circonus.com/resources/api/calls/outlier_report + Provision Broker https://login.circonus.com/resources/api/calls/provision_broker + Rule Set https://login.circonus.com/resources/api/calls/rule_set + Rule Set Group https://login.circonus.com/resources/api/calls/rule_set_group + User https://login.circonus.com/resources/api/calls/user + Worksheet https://login.circonus.com/resources/api/calls/worksheet + +Endpoints (not supported) + + Support may be added for these endpoints in the future. These endpoints may currently be used + directly with the Raw REST methods above. + + CAQL https://login.circonus.com/resources/api/calls/caql + Check Move https://login.circonus.com/resources/api/calls/check_move + Data https://login.circonus.com/resources/api/calls/data + Snapshot https://login.circonus.com/resources/api/calls/snapshot + Tag https://login.circonus.com/resources/api/calls/tag + Template https://login.circonus.com/resources/api/calls/template + +Verbs + + Fetch singular/plural item(s) - e.g. FetchAnnotation, FetchAnnotations + Create create new item - e.g. CreateAnnotation + Update update an item - e.g. UpdateAnnotation + Delete remove an item - e.g. DeleteAnnotation, DeleteAnnotationByCID + Search search for item(s) - e.g. SearchAnnotations + New new item config - e.g. NewAnnotation (returns an empty item, + any applicable defautls defined) + + Not all endpoints support all verbs. +*/ +package api diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go new file mode 100644 index 000000000..2d2865327 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go @@ -0,0 +1,349 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Graph API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/graph + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// GraphAccessKey defines an access key for a graph +type GraphAccessKey struct { + Active bool `json:"active,omitempty"` // boolean + Height uint `json:"height,omitempty"` // uint + Key string `json:"key,omitempty"` // string + Legend bool `json:"legend,omitempty"` // boolean + LockDate bool `json:"lock_date,omitempty"` // boolean + LockMode string `json:"lock_mode,omitempty"` // string + LockRangeEnd uint `json:"lock_range_end,omitempty"` // uint + LockRangeStart uint `json:"lock_range_start,omitempty"` // uint + LockShowTimes bool `json:"lock_show_times,omitempty"` // boolean + LockZoom string `json:"lock_zoom,omitempty"` // string + Nickname string `json:"nickname,omitempty"` // string + Title bool `json:"title,omitempty"` // boolean + Width uint `json:"width,omitempty"` // uint + XLabels bool `json:"x_labels,omitempty"` // boolean + YLabels bool `json:"y_labels,omitempty"` // boolean +} + +// GraphComposite defines a composite +type GraphComposite struct { + Axis string `json:"axis,omitempty"` // string + Color string `json:"color,omitempty"` // string + DataFormula *string `json:"data_formula,omitempty"` // string or null + Hidden bool `json:"hidden,omitempty"` // boolean + LegendFormula *string `json:"legend_formula,omitempty"` // string or null + Name string `json:"name,omitempty"` // string + Stack *uint `json:"stack,omitempty"` // uint or null +} + +// GraphDatapoint defines a datapoint +type GraphDatapoint struct { + Alpha *float64 `json:"alpha,string,omitempty"` // float64 + Axis string `json:"axis,omitempty"` // string + CAQL *string `json:"caql,omitempty"` // string or null + CheckID uint `json:"check_id,omitempty"` // uint + Color *string `json:"color,omitempty"` // string + DataFormula *string `json:"data_formula"` // string or null + Derive interface{} `json:"derive,omitempty"` // BUG doc: string, api: string or boolean(for caql statements) + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + MetricName string `json:"metric_name,omitempty"` // string + MetricType string `json:"metric_type,omitempty"` // string + Name string `json:"name"` // string + Stack *uint `json:"stack"` // uint or null +} + +// GraphGuide defines a guide +type GraphGuide struct { + Color string `json:"color,omitempty"` // string + DataFormula *string `json:"data_formula,omitempty"` // string or null + Hidden bool `json:"hidden,omitempty"` // boolean + LegendFormula *string `json:"legend_formula,omitempty"` // string or null + Name string `json:"name,omitempty"` // string +} + +// GraphMetricCluster defines a metric cluster +type GraphMetricCluster struct { + AggregateFunc string `json:"aggregate_function,omitempty"` // string + Axis string `json:"axis,omitempty"` // string + Color *string `json:"color,omitempty"` // string + DataFormula *string `json:"data_formula"` // string or null + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + MetricCluster string `json:"metric_cluster,omitempty"` // string + Name string `json:"name,omitempty"` // string + Stack *uint `json:"stack"` // uint or null +} + +// OverlayDataOptions defines overlay options for data. Note, each overlay type requires +// a _subset_ of the options. See Graph API documentation (URL above) for details. +type OverlayDataOptions struct { + Alerts *int `json:"alerts,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + ArrayOutput *int `json:"array_output,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + BasePeriod *int `json:"base_period,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Delay *int `json:"delay,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Extension string `json:"extension,omitempty"` // string + GraphTitle string `json:"graph_title,omitempty"` // string + GraphUUID string `json:"graph_id,omitempty"` // string + InPercent *bool `json:"in_percent,string,omitempty"` // boolean encoded as string BUG doc: boolean, api: string + Inverse *int `json:"inverse,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Method string `json:"method,omitempty"` // string + Model string `json:"model,omitempty"` // string + ModelEnd string `json:"model_end,omitempty"` // string + ModelPeriod string `json:"model_period,omitempty"` // string + ModelRelative *int `json:"model_relative,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Out string `json:"out,omitempty"` // string + Prequel string `json:"prequel,omitempty"` // string + Presets string `json:"presets,omitempty"` // string + Quantiles string `json:"quantiles,omitempty"` // string + SeasonLength *int `json:"season_length,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Sensitivity *int `json:"sensitivity,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + SingleValue *int `json:"single_value,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + TargetPeriod string `json:"target_period,omitempty"` // string + TimeOffset string `json:"time_offset,omitempty"` // string + TimeShift *int `json:"time_shift,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Transform string `json:"transform,omitempty"` // string + Version *int `json:"version,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + Window *int `json:"window,string,omitempty"` // int encoded as string BUG doc: numeric, api: string + XShift string `json:"x_shift,omitempty"` // string +} + +// OverlayUISpecs defines UI specs for overlay +type OverlayUISpecs struct { + Decouple bool `json:"decouple,omitempty"` // boolean + ID string `json:"id,omitempty"` // string + Label string `json:"label,omitempty"` // string + Type string `json:"type,omitempty"` // string + Z *int `json:"z,string,omitempty"` // int encoded as string BUG doc: numeric, api: string +} + +// GraphOverlaySet defines overlays for graph +type GraphOverlaySet struct { + DataOpts OverlayDataOptions `json:"data_opts,omitempty"` // OverlayDataOptions + ID string `json:"id,omitempty"` // string + Title string `json:"title,omitempty"` // string + UISpecs OverlayUISpecs `json:"ui_specs,omitempty"` // OverlayUISpecs +} + +// Graph defines a graph. See https://login.circonus.com/resources/api/calls/graph for more information. +type Graph struct { + AccessKeys []GraphAccessKey `json:"access_keys,omitempty"` // [] len >= 0 + CID string `json:"_cid,omitempty"` // string + Composites []GraphComposite `json:"composites,omitempty"` // [] len >= 0 + Datapoints []GraphDatapoint `json:"datapoints,omitempt"` // [] len >= 0 + Description string `json:"description,omitempty"` // string + Guides []GraphGuide `json:"guides,omitempty"` // [] len >= 0 + LineStyle *string `json:"line_style"` // string or null + LogLeftY *int `json:"logarithmic_left_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string) + LogRightY *int `json:"logarithmic_right_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string) + MaxLeftY *float64 `json:"max_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + MaxRightY *float64 `json:"max_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + MetricClusters []GraphMetricCluster `json:"metric_clusters,omitempty"` // [] len >= 0 + MinLeftY *float64 `json:"min_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + MinRightY *float64 `json:"min_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + Notes *string `json:"notes,omitempty"` // string or null + OverlaySets *map[string]GraphOverlaySet `json:"overlay_sets,omitempty"` // GroupOverLaySets or null + Style *string `json:"style"` // string or null + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Title string `json:"title,omitempty"` // string +} + +// NewGraph returns a Graph (with defaults, if applicable) +func NewGraph() *Graph { + return &Graph{} +} + +// FetchGraph retrieves graph with passed cid. +func (a *API) FetchGraph(cid CIDType) (*Graph, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid graph CID [none]") + } + + graphCID := string(*cid) + + matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID) + } + + result, err := a.Get(graphCID) + if err != nil { + return nil, err + } + if a.Debug { + a.Log.Printf("[DEBUG] fetch graph, received JSON: %s", string(result)) + } + + graph := new(Graph) + if err := json.Unmarshal(result, graph); err != nil { + return nil, err + } + + return graph, nil +} + +// FetchGraphs retrieves all graphs available to the API Token. +func (a *API) FetchGraphs() (*[]Graph, error) { + result, err := a.Get(config.GraphPrefix) + if err != nil { + return nil, err + } + + var graphs []Graph + if err := json.Unmarshal(result, &graphs); err != nil { + return nil, err + } + + return &graphs, nil +} + +// UpdateGraph updates passed graph. +func (a *API) UpdateGraph(cfg *Graph) (*Graph, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid graph config [nil]") + } + + graphCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(graphCID, jsonCfg) + if err != nil { + return nil, err + } + + graph := &Graph{} + if err := json.Unmarshal(result, graph); err != nil { + return nil, err + } + + return graph, nil +} + +// CreateGraph creates a new graph. +func (a *API) CreateGraph(cfg *Graph) (*Graph, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid graph config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.GraphPrefix, jsonCfg) + if err != nil { + return nil, err + } + + graph := &Graph{} + if err := json.Unmarshal(result, graph); err != nil { + return nil, err + } + + return graph, nil +} + +// DeleteGraph deletes passed graph. +func (a *API) DeleteGraph(cfg *Graph) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid graph config [nil]") + } + return a.DeleteGraphByCID(CIDType(&cfg.CID)) +} + +// DeleteGraphByCID deletes graph with passed cid. +func (a *API) DeleteGraphByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid graph CID [none]") + } + + graphCID := string(*cid) + + matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid graph CID [%s]", graphCID) + } + + _, err = a.Delete(graphCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchGraphs returns graphs matching the specified search query +// and/or filter. If nil is passed for both parameters all graphs +// will be returned. +func (a *API) SearchGraphs(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Graph, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchGraphs() + } + + reqURL := url.URL{ + Path: config.GraphPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var graphs []Graph + if err := json.Unmarshal(result, &graphs); err != nil { + return nil, err + } + + return &graphs, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go new file mode 100644 index 000000000..0e5e04729 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go @@ -0,0 +1,220 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Maintenance window API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/maintenance + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Maintenance defines a maintenance window. See https://login.circonus.com/resources/api/calls/maintenance for more information. +type Maintenance struct { + CID string `json:"_cid,omitempty"` // string + Item string `json:"item,omitempty"` // string + Notes string `json:"notes,omitempty"` // string + Severities interface{} `json:"severities,omitempty"` // []string NOTE can be set with CSV string or []string + Start uint `json:"start,omitempty"` // uint + Stop uint `json:"stop,omitempty"` // uint + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Type string `json:"type,omitempty"` // string +} + +// NewMaintenanceWindow returns a new Maintenance window (with defaults, if applicable) +func NewMaintenanceWindow() *Maintenance { + return &Maintenance{} +} + +// FetchMaintenanceWindow retrieves maintenance [window] with passed cid. +func (a *API) FetchMaintenanceWindow(cid CIDType) (*Maintenance, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid maintenance window CID [none]") + } + + maintenanceCID := string(*cid) + + matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID) + } + + result, err := a.Get(maintenanceCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch maintenance window, received JSON: %s", string(result)) + } + + window := &Maintenance{} + if err := json.Unmarshal(result, window); err != nil { + return nil, err + } + + return window, nil +} + +// FetchMaintenanceWindows retrieves all maintenance [windows] available to API Token. +func (a *API) FetchMaintenanceWindows() (*[]Maintenance, error) { + result, err := a.Get(config.MaintenancePrefix) + if err != nil { + return nil, err + } + + var windows []Maintenance + if err := json.Unmarshal(result, &windows); err != nil { + return nil, err + } + + return &windows, nil +} + +// UpdateMaintenanceWindow updates passed maintenance [window]. +func (a *API) UpdateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid maintenance window config [nil]") + } + + maintenanceCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update maintenance window, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(maintenanceCID, jsonCfg) + if err != nil { + return nil, err + } + + window := &Maintenance{} + if err := json.Unmarshal(result, window); err != nil { + return nil, err + } + + return window, nil +} + +// CreateMaintenanceWindow creates a new maintenance [window]. +func (a *API) CreateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid maintenance window config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create maintenance window, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.MaintenancePrefix, jsonCfg) + if err != nil { + return nil, err + } + + window := &Maintenance{} + if err := json.Unmarshal(result, window); err != nil { + return nil, err + } + + return window, nil +} + +// DeleteMaintenanceWindow deletes passed maintenance [window]. +func (a *API) DeleteMaintenanceWindow(cfg *Maintenance) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid maintenance window config [nil]") + } + return a.DeleteMaintenanceWindowByCID(CIDType(&cfg.CID)) +} + +// DeleteMaintenanceWindowByCID deletes maintenance [window] with passed cid. +func (a *API) DeleteMaintenanceWindowByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid maintenance window CID [none]") + } + + maintenanceCID := string(*cid) + + matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID) + } + + _, err = a.Delete(maintenanceCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchMaintenanceWindows returns maintenance [windows] matching +// the specified search query and/or filter. If nil is passed for +// both parameters all maintenance [windows] will be returned. +func (a *API) SearchMaintenanceWindows(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Maintenance, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchMaintenanceWindows() + } + + reqURL := url.URL{ + Path: config.MaintenancePrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var windows []Maintenance + if err := json.Unmarshal(result, &windows); err != nil { + return nil, err + } + + return &windows, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go new file mode 100644 index 000000000..3608b06ff --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go @@ -0,0 +1,162 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Metric API support - Fetch, Create*, Update, Delete*, and Search +// See: https://login.circonus.com/resources/api/calls/metric +// * : create and delete are handled via check_bundle or check_bundle_metrics + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Metric defines a metric. See https://login.circonus.com/resources/api/calls/metric for more information. +type Metric struct { + Active bool `json:"_active,omitempty"` // boolean + CheckActive bool `json:"_check_active,omitempty"` // boolean + CheckBundleCID string `json:"_check_bundle,omitempty"` // string + CheckCID string `json:"_check,omitempty"` // string + CheckTags []string `json:"_check_tags,omitempty"` // [] len >= 0 + CheckUUID string `json:"_check_uuid,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + Histogram string `json:"_histogram,omitempty"` // string + Link *string `json:"link,omitempty"` // string or null + MetricName string `json:"_metric_name,omitempty"` // string + MetricType string `json:"_metric_type,omitempty"` // string + Notes *string `json:"notes,omitempty"` // string or null + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Units *string `json:"units,omitempty"` // string or null +} + +// FetchMetric retrieves metric with passed cid. +func (a *API) FetchMetric(cid CIDType) (*Metric, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid metric CID [none]") + } + + metricCID := string(*cid) + + matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID) + } + + result, err := a.Get(metricCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch metric, received JSON: %s", string(result)) + } + + metric := &Metric{} + if err := json.Unmarshal(result, metric); err != nil { + return nil, err + } + + return metric, nil +} + +// FetchMetrics retrieves all metrics available to API Token. +func (a *API) FetchMetrics() (*[]Metric, error) { + result, err := a.Get(config.MetricPrefix) + if err != nil { + return nil, err + } + + var metrics []Metric + if err := json.Unmarshal(result, &metrics); err != nil { + return nil, err + } + + return &metrics, nil +} + +// UpdateMetric updates passed metric. +func (a *API) UpdateMetric(cfg *Metric) (*Metric, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid metric config [nil]") + } + + metricCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update metric, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(metricCID, jsonCfg) + if err != nil { + return nil, err + } + + metric := &Metric{} + if err := json.Unmarshal(result, metric); err != nil { + return nil, err + } + + return metric, nil +} + +// SearchMetrics returns metrics matching the specified search query +// and/or filter. If nil is passed for both parameters all metrics +// will be returned. +func (a *API) SearchMetrics(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Metric, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchMetrics() + } + + reqURL := url.URL{ + Path: config.MetricPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var metrics []Metric + if err := json.Unmarshal(result, &metrics); err != nil { + return nil, err + } + + return &metrics, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go new file mode 100644 index 000000000..d29c5a674 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go @@ -0,0 +1,261 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Metric Cluster API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/metric_cluster + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// MetricQuery object +type MetricQuery struct { + Query string `json:"query"` + Type string `json:"type"` +} + +// MetricCluster defines a metric cluster. See https://login.circonus.com/resources/api/calls/metric_cluster for more information. +type MetricCluster struct { + CID string `json:"_cid,omitempty"` // string + Description string `json:"description"` // string + MatchingMetrics []string `json:"_matching_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set) + MatchingUUIDMetrics map[string][]string `json:"_matching_uuid_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set) + Name string `json:"name"` // string + Queries []MetricQuery `json:"queries"` // [] len >= 1 + Tags []string `json:"tags"` // [] len >= 0 +} + +// NewMetricCluster returns a new MetricCluster (with defaults, if applicable) +func NewMetricCluster() *MetricCluster { + return &MetricCluster{} +} + +// FetchMetricCluster retrieves metric cluster with passed cid. +func (a *API) FetchMetricCluster(cid CIDType, extras string) (*MetricCluster, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid metric cluster CID [none]") + } + + clusterCID := string(*cid) + + matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID) + } + + reqURL := url.URL{ + Path: clusterCID, + } + + extra := "" + switch extras { + case "metrics": + extra = "_matching_metrics" + case "uuids": + extra = "_matching_uuid_metrics" + } + + if extra != "" { + q := url.Values{} + q.Set("extra", extra) + reqURL.RawQuery = q.Encode() + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch metric cluster, received JSON: %s", string(result)) + } + + cluster := &MetricCluster{} + if err := json.Unmarshal(result, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// FetchMetricClusters retrieves all metric clusters available to API Token. +func (a *API) FetchMetricClusters(extras string) (*[]MetricCluster, error) { + reqURL := url.URL{ + Path: config.MetricClusterPrefix, + } + + extra := "" + switch extras { + case "metrics": + extra = "_matching_metrics" + case "uuids": + extra = "_matching_uuid_metrics" + } + + if extra != "" { + q := url.Values{} + q.Set("extra", extra) + reqURL.RawQuery = q.Encode() + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, err + } + + var clusters []MetricCluster + if err := json.Unmarshal(result, &clusters); err != nil { + return nil, err + } + + return &clusters, nil +} + +// UpdateMetricCluster updates passed metric cluster. +func (a *API) UpdateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid metric cluster config [nil]") + } + + clusterCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update metric cluster, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(clusterCID, jsonCfg) + if err != nil { + return nil, err + } + + cluster := &MetricCluster{} + if err := json.Unmarshal(result, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// CreateMetricCluster creates a new metric cluster. +func (a *API) CreateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid metric cluster config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create metric cluster, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.MetricClusterPrefix, jsonCfg) + if err != nil { + return nil, err + } + + cluster := &MetricCluster{} + if err := json.Unmarshal(result, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// DeleteMetricCluster deletes passed metric cluster. +func (a *API) DeleteMetricCluster(cfg *MetricCluster) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid metric cluster config [nil]") + } + return a.DeleteMetricClusterByCID(CIDType(&cfg.CID)) +} + +// DeleteMetricClusterByCID deletes metric cluster with passed cid. +func (a *API) DeleteMetricClusterByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid metric cluster CID [none]") + } + + clusterCID := string(*cid) + + matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID) + } + + _, err = a.Delete(clusterCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchMetricClusters returns metric clusters matching the specified +// search query and/or filter. If nil is passed for both parameters +// all metric clusters will be returned. +func (a *API) SearchMetricClusters(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]MetricCluster, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchMetricClusters("") + } + + reqURL := url.URL{ + Path: config.MetricClusterPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var clusters []MetricCluster + if err := json.Unmarshal(result, &clusters); err != nil { + return nil, err + } + + return &clusters, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go new file mode 100644 index 000000000..bc1a4d2b3 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go @@ -0,0 +1,221 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OutlierReport API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/report + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// OutlierReport defines a outlier report. See https://login.circonus.com/resources/api/calls/report for more information. +type OutlierReport struct { + CID string `json:"_cid,omitempty"` // string + Config string `json:"config,omitempty"` // string + Created uint `json:"_created,omitempty"` // uint + CreatedBy string `json:"_created_by,omitempty"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + MetricClusterCID string `json:"metric_cluster,omitempty"` // st ring + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Title string `json:"title,omitempty"` // string +} + +// NewOutlierReport returns a new OutlierReport (with defaults, if applicable) +func NewOutlierReport() *OutlierReport { + return &OutlierReport{} +} + +// FetchOutlierReport retrieves outlier report with passed cid. +func (a *API) FetchOutlierReport(cid CIDType) (*OutlierReport, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid outlier report CID [none]") + } + + reportCID := string(*cid) + + matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID) + } + + result, err := a.Get(reportCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch outlier report, received JSON: %s", string(result)) + } + + report := &OutlierReport{} + if err := json.Unmarshal(result, report); err != nil { + return nil, err + } + + return report, nil +} + +// FetchOutlierReports retrieves all outlier reports available to API Token. +func (a *API) FetchOutlierReports() (*[]OutlierReport, error) { + result, err := a.Get(config.OutlierReportPrefix) + if err != nil { + return nil, err + } + + var reports []OutlierReport + if err := json.Unmarshal(result, &reports); err != nil { + return nil, err + } + + return &reports, nil +} + +// UpdateOutlierReport updates passed outlier report. +func (a *API) UpdateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid outlier report config [nil]") + } + + reportCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update outlier report, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(reportCID, jsonCfg) + if err != nil { + return nil, err + } + + report := &OutlierReport{} + if err := json.Unmarshal(result, report); err != nil { + return nil, err + } + + return report, nil +} + +// CreateOutlierReport creates a new outlier report. +func (a *API) CreateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid outlier report config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create outlier report, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.OutlierReportPrefix, jsonCfg) + if err != nil { + return nil, err + } + + report := &OutlierReport{} + if err := json.Unmarshal(result, report); err != nil { + return nil, err + } + + return report, nil +} + +// DeleteOutlierReport deletes passed outlier report. +func (a *API) DeleteOutlierReport(cfg *OutlierReport) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid outlier report config [nil]") + } + return a.DeleteOutlierReportByCID(CIDType(&cfg.CID)) +} + +// DeleteOutlierReportByCID deletes outlier report with passed cid. +func (a *API) DeleteOutlierReportByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid outlier report CID [none]") + } + + reportCID := string(*cid) + + matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid outlier report CID [%s]", reportCID) + } + + _, err = a.Delete(reportCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchOutlierReports returns outlier report matching the +// specified search query and/or filter. If nil is passed for +// both parameters all outlier report will be returned. +func (a *API) SearchOutlierReports(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]OutlierReport, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchOutlierReports() + } + + reqURL := url.URL{ + Path: config.OutlierReportPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var reports []OutlierReport + if err := json.Unmarshal(result, &reports); err != nil { + return nil, err + } + + return &reports, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go new file mode 100644 index 000000000..5b432a236 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go @@ -0,0 +1,151 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ProvisionBroker API support - Fetch, Create, and Update +// See: https://login.circonus.com/resources/api/calls/provision_broker +// Note that the provision_broker endpoint does not return standard cid format +// of '/object/item' (e.g. /provision_broker/abc-123) it just returns 'item' + +package api + +import ( + "encoding/json" + "fmt" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// BrokerStratcon defines stratcons for broker +type BrokerStratcon struct { + CN string `json:"cn,omitempty"` // string + Host string `json:"host,omitempty"` // string + Port string `json:"port,omitempty"` // string +} + +// ProvisionBroker defines a provision broker [request]. See https://login.circonus.com/resources/api/calls/provision_broker for more details. +type ProvisionBroker struct { + Cert string `json:"_cert,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + CSR string `json:"_csr,omitempty"` // string + ExternalHost string `json:"external_host,omitempty"` // string + ExternalPort string `json:"external_port,omitempty"` // string + IPAddress string `json:"ipaddress,omitempty"` // string + Latitude string `json:"latitude,omitempty"` // string + Longitude string `json:"longitude,omitempty"` // string + Name string `json:"noit_name,omitempty"` // string + Port string `json:"port,omitempty"` // string + PreferReverseConnection bool `json:"prefer_reverse_connection,omitempty"` // boolean + Rebuild bool `json:"rebuild,omitempty"` // boolean + Stratcons []BrokerStratcon `json:"_stratcons,omitempty"` // [] len >= 1 + Tags []string `json:"tags,omitempty"` // [] len >= 0 +} + +// NewProvisionBroker returns a new ProvisionBroker (with defaults, if applicable) +func NewProvisionBroker() *ProvisionBroker { + return &ProvisionBroker{} +} + +// FetchProvisionBroker retrieves provision broker [request] with passed cid. +func (a *API) FetchProvisionBroker(cid CIDType) (*ProvisionBroker, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid provision broker request CID [none]") + } + + brokerCID := string(*cid) + + matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID) + } + + result, err := a.Get(brokerCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch broker provision request, received JSON: %s", string(result)) + } + + broker := &ProvisionBroker{} + if err := json.Unmarshal(result, broker); err != nil { + return nil, err + } + + return broker, nil +} + +// UpdateProvisionBroker updates a broker definition [request]. +func (a *API) UpdateProvisionBroker(cid CIDType, cfg *ProvisionBroker) (*ProvisionBroker, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid provision broker request config [nil]") + } + + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid provision broker request CID [none]") + } + + brokerCID := string(*cid) + + matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update broker provision request, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(brokerCID, jsonCfg) + if err != nil { + return nil, err + } + + broker := &ProvisionBroker{} + if err := json.Unmarshal(result, broker); err != nil { + return nil, err + } + + return broker, nil +} + +// CreateProvisionBroker creates a new provison broker [request]. +func (a *API) CreateProvisionBroker(cfg *ProvisionBroker) (*ProvisionBroker, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid provision broker request config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create broker provision request, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.ProvisionBrokerPrefix, jsonCfg) + if err != nil { + return nil, err + } + + broker := &ProvisionBroker{} + if err := json.Unmarshal(result, broker); err != nil { + return nil, err + } + + return broker, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go new file mode 100644 index 000000000..3da0907f7 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go @@ -0,0 +1,234 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Rule Set API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/rule_set + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// RuleSetRule defines a ruleset rule +type RuleSetRule struct { + Criteria string `json:"criteria"` // string + Severity uint `json:"severity"` // uint + Value interface{} `json:"value"` // BUG doc: string, api: actual type returned switches based on Criteria + Wait uint `json:"wait"` // uint + WindowingDuration uint `json:"windowing_duration,omitempty"` // uint + WindowingFunction *string `json:"windowing_function,omitempty"` // string or null +} + +// RuleSet defines a ruleset. See https://login.circonus.com/resources/api/calls/rule_set for more information. +type RuleSet struct { + CheckCID string `json:"check"` // string + CID string `json:"_cid,omitempty"` // string + ContactGroups map[uint8][]string `json:"contact_groups"` // [] len 5 + Derive *string `json:"derive,omitempty"` // string or null + Link *string `json:"link"` // string or null + MetricName string `json:"metric_name"` // string + MetricTags []string `json:"metric_tags"` // [] len >= 0 + MetricType string `json:"metric_type"` // string + Notes *string `json:"notes"` // string or null + Parent *string `json:"parent,omitempty"` // string or null + Rules []RuleSetRule `json:"rules"` // [] len >= 1 + Tags []string `json:"tags"` // [] len >= 0 +} + +// NewRuleSet returns a new RuleSet (with defaults if applicable) +func NewRuleSet() *RuleSet { + return &RuleSet{} +} + +// FetchRuleSet retrieves rule set with passed cid. +func (a *API) FetchRuleSet(cid CIDType) (*RuleSet, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid rule set CID [none]") + } + + rulesetCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID) + } + + result, err := a.Get(rulesetCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch rule set, received JSON: %s", string(result)) + } + + ruleset := &RuleSet{} + if err := json.Unmarshal(result, ruleset); err != nil { + return nil, err + } + + return ruleset, nil +} + +// FetchRuleSets retrieves all rule sets available to API Token. +func (a *API) FetchRuleSets() (*[]RuleSet, error) { + result, err := a.Get(config.RuleSetPrefix) + if err != nil { + return nil, err + } + + var rulesets []RuleSet + if err := json.Unmarshal(result, &rulesets); err != nil { + return nil, err + } + + return &rulesets, nil +} + +// UpdateRuleSet updates passed rule set. +func (a *API) UpdateRuleSet(cfg *RuleSet) (*RuleSet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set config [nil]") + } + + rulesetCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update rule set, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(rulesetCID, jsonCfg) + if err != nil { + return nil, err + } + + ruleset := &RuleSet{} + if err := json.Unmarshal(result, ruleset); err != nil { + return nil, err + } + + return ruleset, nil +} + +// CreateRuleSet creates a new rule set. +func (a *API) CreateRuleSet(cfg *RuleSet) (*RuleSet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create rule set, sending JSON: %s", string(jsonCfg)) + } + + resp, err := a.Post(config.RuleSetPrefix, jsonCfg) + if err != nil { + return nil, err + } + + ruleset := &RuleSet{} + if err := json.Unmarshal(resp, ruleset); err != nil { + return nil, err + } + + return ruleset, nil +} + +// DeleteRuleSet deletes passed rule set. +func (a *API) DeleteRuleSet(cfg *RuleSet) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid rule set config [nil]") + } + return a.DeleteRuleSetByCID(CIDType(&cfg.CID)) +} + +// DeleteRuleSetByCID deletes rule set with passed cid. +func (a *API) DeleteRuleSetByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid rule set CID [none]") + } + + rulesetCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID) + } + + _, err = a.Delete(rulesetCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchRuleSets returns rule sets matching the specified search +// query and/or filter. If nil is passed for both parameters all +// rule sets will be returned. +func (a *API) SearchRuleSets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSet, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchRuleSets() + } + + reqURL := url.URL{ + Path: config.RuleSetPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var rulesets []RuleSet + if err := json.Unmarshal(result, &rulesets); err != nil { + return nil, err + } + + return &rulesets, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go new file mode 100644 index 000000000..a15743061 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go @@ -0,0 +1,231 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RuleSetGroup API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/rule_set_group + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// RuleSetGroupFormula defines a formula for raising alerts +type RuleSetGroupFormula struct { + Expression interface{} `json:"expression"` // string or uint BUG doc: string, api: string or numeric + RaiseSeverity uint `json:"raise_severity"` // uint + Wait uint `json:"wait"` // uint +} + +// RuleSetGroupCondition defines conditions for raising alerts +type RuleSetGroupCondition struct { + MatchingSeverities []string `json:"matching_serverities"` // [] len >= 1 + RuleSetCID string `json:"rule_set"` // string +} + +// RuleSetGroup defines a ruleset group. See https://login.circonus.com/resources/api/calls/rule_set_group for more information. +type RuleSetGroup struct { + CID string `json:"_cid,omitempty"` // string + ContactGroups map[uint8][]string `json:"contact_groups"` // [] len == 5 + Formulas []RuleSetGroupFormula `json:"formulas"` // [] len >= 0 + Name string `json:"name"` // string + RuleSetConditions []RuleSetGroupCondition `json:"rule_set_conditions"` // [] len >= 1 + Tags []string `json:"tags"` // [] len >= 0 +} + +// NewRuleSetGroup returns a new RuleSetGroup (with defaults, if applicable) +func NewRuleSetGroup() *RuleSetGroup { + return &RuleSetGroup{} +} + +// FetchRuleSetGroup retrieves rule set group with passed cid. +func (a *API) FetchRuleSetGroup(cid CIDType) (*RuleSetGroup, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid rule set group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID) + } + + result, err := a.Get(groupCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch rule set group, received JSON: %s", string(result)) + } + + rulesetGroup := &RuleSetGroup{} + if err := json.Unmarshal(result, rulesetGroup); err != nil { + return nil, err + } + + return rulesetGroup, nil +} + +// FetchRuleSetGroups retrieves all rule set groups available to API Token. +func (a *API) FetchRuleSetGroups() (*[]RuleSetGroup, error) { + result, err := a.Get(config.RuleSetGroupPrefix) + if err != nil { + return nil, err + } + + var rulesetGroups []RuleSetGroup + if err := json.Unmarshal(result, &rulesetGroups); err != nil { + return nil, err + } + + return &rulesetGroups, nil +} + +// UpdateRuleSetGroup updates passed rule set group. +func (a *API) UpdateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set group config [nil]") + } + + groupCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update rule set group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(groupCID, jsonCfg) + if err != nil { + return nil, err + } + + groups := &RuleSetGroup{} + if err := json.Unmarshal(result, groups); err != nil { + return nil, err + } + + return groups, nil +} + +// CreateRuleSetGroup creates a new rule set group. +func (a *API) CreateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set group config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create rule set group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.RuleSetGroupPrefix, jsonCfg) + if err != nil { + return nil, err + } + + group := &RuleSetGroup{} + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// DeleteRuleSetGroup deletes passed rule set group. +func (a *API) DeleteRuleSetGroup(cfg *RuleSetGroup) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid rule set group config [nil]") + } + return a.DeleteRuleSetGroupByCID(CIDType(&cfg.CID)) +} + +// DeleteRuleSetGroupByCID deletes rule set group wiht passed cid. +func (a *API) DeleteRuleSetGroupByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid rule set group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid rule set group CID [%s]", groupCID) + } + + _, err = a.Delete(groupCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchRuleSetGroups returns rule set groups matching the +// specified search query and/or filter. If nil is passed for +// both parameters all rule set groups will be returned. +func (a *API) SearchRuleSetGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSetGroup, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchRuleSetGroups() + } + + reqURL := url.URL{ + Path: config.RuleSetGroupPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var groups []RuleSetGroup + if err := json.Unmarshal(result, &groups); err != nil { + return nil, err + } + + return &groups, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/user.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/user.go new file mode 100644 index 000000000..7771991d3 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/user.go @@ -0,0 +1,159 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// User API support - Fetch, Update, and Search +// See: https://login.circonus.com/resources/api/calls/user +// Note: Create and Delete are not supported directly via the User API +// endpoint. See the Account endpoint for inviting and removing users +// from specific accounts. + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// UserContactInfo defines known contact details +type UserContactInfo struct { + SMS string `json:"sms,omitempty"` // string + XMPP string `json:"xmpp,omitempty"` // string +} + +// User defines a user. See https://login.circonus.com/resources/api/calls/user for more information. +type User struct { + CID string `json:"_cid,omitempty"` // string + ContactInfo UserContactInfo `json:"contact_info,omitempty"` // UserContactInfo + Email string `json:"email"` // string + Firstname string `json:"firstname"` // string + Lastname string `json:"lastname"` // string +} + +// FetchUser retrieves user with passed cid. Pass nil for '/user/current'. +func (a *API) FetchUser(cid CIDType) (*User, error) { + var userCID string + + if cid == nil || *cid == "" { + userCID = config.UserPrefix + "/current" + } else { + userCID = string(*cid) + } + + matched, err := regexp.MatchString(config.UserCIDRegex, userCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid user CID [%s]", userCID) + } + + result, err := a.Get(userCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch user, received JSON: %s", string(result)) + } + + user := new(User) + if err := json.Unmarshal(result, user); err != nil { + return nil, err + } + + return user, nil +} + +// FetchUsers retrieves all users available to API Token. +func (a *API) FetchUsers() (*[]User, error) { + result, err := a.Get(config.UserPrefix) + if err != nil { + return nil, err + } + + var users []User + if err := json.Unmarshal(result, &users); err != nil { + return nil, err + } + + return &users, nil +} + +// UpdateUser updates passed user. +func (a *API) UpdateUser(cfg *User) (*User, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid user config [nil]") + } + + userCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.UserCIDRegex, userCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid user CID [%s]", userCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update user, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(userCID, jsonCfg) + if err != nil { + return nil, err + } + + user := &User{} + if err := json.Unmarshal(result, user); err != nil { + return nil, err + } + + return user, nil +} + +// SearchUsers returns users matching a filter (search queries +// are not suppoted by the user endpoint). Pass nil as filter for all +// users available to the API Token. +func (a *API) SearchUsers(filterCriteria *SearchFilterType) (*[]User, error) { + q := url.Values{} + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchUsers() + } + + reqURL := url.URL{ + Path: config.UserPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var users []User + if err := json.Unmarshal(result, &users); err != nil { + return nil, err + } + + return &users, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go new file mode 100644 index 000000000..0dd5e9373 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go @@ -0,0 +1,232 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Worksheet API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/worksheet + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// WorksheetGraph defines a worksheet cid to be include in the worksheet +type WorksheetGraph struct { + GraphCID string `json:"graph"` // string +} + +// WorksheetSmartQuery defines a query to include multiple worksheets +type WorksheetSmartQuery struct { + Name string `json:"name"` + Order []string `json:"order"` + Query string `json:"query"` +} + +// Worksheet defines a worksheet. See https://login.circonus.com/resources/api/calls/worksheet for more information. +type Worksheet struct { + CID string `json:"_cid,omitempty"` // string + Description *string `json:"description"` // string or null + Favorite bool `json:"favorite"` // boolean + Graphs []WorksheetGraph `json:"worksheets,omitempty"` // [] len >= 0 + Notes *string `json:"notes"` // string or null + SmartQueries []WorksheetSmartQuery `json:"smart_queries,omitempty"` // [] len >= 0 + Tags []string `json:"tags"` // [] len >= 0 + Title string `json:"title"` // string +} + +// NewWorksheet returns a new Worksheet (with defaults, if applicable) +func NewWorksheet() *Worksheet { + return &Worksheet{} +} + +// FetchWorksheet retrieves worksheet with passed cid. +func (a *API) FetchWorksheet(cid CIDType) (*Worksheet, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid worksheet CID [none]") + } + + worksheetCID := string(*cid) + + matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID) + } + + result, err := a.Get(string(*cid)) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch worksheet, received JSON: %s", string(result)) + } + + worksheet := new(Worksheet) + if err := json.Unmarshal(result, worksheet); err != nil { + return nil, err + } + + return worksheet, nil +} + +// FetchWorksheets retrieves all worksheets available to API Token. +func (a *API) FetchWorksheets() (*[]Worksheet, error) { + result, err := a.Get(config.WorksheetPrefix) + if err != nil { + return nil, err + } + + var worksheets []Worksheet + if err := json.Unmarshal(result, &worksheets); err != nil { + return nil, err + } + + return &worksheets, nil +} + +// UpdateWorksheet updates passed worksheet. +func (a *API) UpdateWorksheet(cfg *Worksheet) (*Worksheet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid worksheet config [nil]") + } + + worksheetCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update worksheet, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(worksheetCID, jsonCfg) + if err != nil { + return nil, err + } + + worksheet := &Worksheet{} + if err := json.Unmarshal(result, worksheet); err != nil { + return nil, err + } + + return worksheet, nil +} + +// CreateWorksheet creates a new worksheet. +func (a *API) CreateWorksheet(cfg *Worksheet) (*Worksheet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid worksheet config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.WorksheetPrefix, jsonCfg) + if err != nil { + return nil, err + } + + worksheet := &Worksheet{} + if err := json.Unmarshal(result, worksheet); err != nil { + return nil, err + } + + return worksheet, nil +} + +// DeleteWorksheet deletes passed worksheet. +func (a *API) DeleteWorksheet(cfg *Worksheet) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid worksheet config [nil]") + } + return a.DeleteWorksheetByCID(CIDType(&cfg.CID)) +} + +// DeleteWorksheetByCID deletes worksheet with passed cid. +func (a *API) DeleteWorksheetByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid worksheet CID [none]") + } + + worksheetCID := string(*cid) + + matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID) + } + + _, err = a.Delete(worksheetCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchWorksheets returns worksheets matching the specified search +// query and/or filter. If nil is passed for both parameters all +// worksheets will be returned. +func (a *API) SearchWorksheets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Worksheet, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchWorksheets() + } + + reqURL := url.URL{ + Path: config.WorksheetPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var worksheets []Worksheet + if err := json.Unmarshal(result, &worksheets); err != nil { + return nil, err + } + + return &worksheets, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go index b23486ce2..a8413fb53 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go @@ -10,6 +10,7 @@ import ( "net" "net/url" "reflect" + "strconv" "strings" "time" @@ -23,7 +24,8 @@ func init() { // Get Broker to use when creating a check func (cm *CheckManager) getBroker() (*api.Broker, error) { if cm.brokerID != 0 { - broker, err := cm.apih.FetchBrokerByID(cm.brokerID) + cid := fmt.Sprintf("/broker/%d", cm.brokerID) + broker, err := cm.apih.FetchBroker(api.CIDType(&cid)) if err != nil { return nil, err } @@ -59,7 +61,7 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp cn := "" for _, detail := range broker.Details { - if detail.IP == host { + if *detail.IP == host { cn = detail.CN break } @@ -76,31 +78,34 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp // Select a broker for use when creating a check, if a specific broker // was not specified. func (cm *CheckManager) selectBroker() (*api.Broker, error) { - var brokerList []api.Broker + var brokerList *[]api.Broker var err error - if cm.brokerSelectTag != "" { - brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag) + if len(cm.brokerSelectTag) > 0 { + filter := api.SearchFilterType{ + "f__tags_has": cm.brokerSelectTag, + } + brokerList, err = cm.apih.SearchBrokers(nil, &filter) if err != nil { return nil, err } } else { - brokerList, err = cm.apih.FetchBrokerList() + brokerList, err = cm.apih.FetchBrokers() if err != nil { return nil, err } } - if len(brokerList) == 0 { + if len(*brokerList) == 0 { return nil, fmt.Errorf("zero brokers found") } validBrokers := make(map[string]api.Broker) haveEnterprise := false - for _, broker := range brokerList { + for _, broker := range *brokerList { if cm.isValidBroker(&broker) { - validBrokers[broker.Cid] = broker + validBrokers[broker.CID] = broker if broker.Type == "enterprise" { haveEnterprise = true } @@ -116,7 +121,7 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { } if len(validBrokers) == 0 { - return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList)) + return nil, fmt.Errorf("found %d broker(s), zero are valid", len(*brokerList)) } validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys() @@ -133,8 +138,20 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { // Verify broker supports the check type to be used func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool { + baseType := string(checkType) + + for _, module := range details.Modules { + if module == baseType { + return true + } + } + + if idx := strings.Index(baseType, ":"); idx > 0 { + baseType = baseType[0:idx] + } + for _, module := range details.Modules { - if CheckTypeType(module) == checkType { + if module == baseType { return true } } @@ -145,10 +162,10 @@ func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details // Is the broker valid (active, supports check type, and reachable) func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { - brokerPort := 0 + var brokerHost string + var brokerPort string valid := false for _, detail := range broker.Details { - brokerPort = 43191 // broker must be active if detail.Status != statusActive { @@ -166,34 +183,46 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { continue } - // broker must be reachable and respond within designated time - conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.IP, brokerPort), cm.brokerMaxResponseTime) - if err != nil { - if detail.CN != "trap.noit.circonus.net" { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' unable to connect, %v\n", broker.Name, err) - } - continue // not able to reach the broker (or respone slow enough for it to be considered not usable) - } - // if circonus trap broker, try port 443 - brokerPort = 443 - conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.CN, brokerPort), cm.brokerMaxResponseTime) - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' unable to connect %v\n", broker.Name, err) - } - continue // not able to reach the broker on 443 either (or respone slow enough for it to be considered not usable) + if detail.ExternalPort != 0 { + brokerPort = strconv.Itoa(int(detail.ExternalPort)) + } else { + if *detail.Port != 0 { + brokerPort = strconv.Itoa(int(*detail.Port)) + } else { + brokerPort = "43191" } } - conn.Close() - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name) + if detail.ExternalHost != nil && *detail.ExternalHost != "" { + brokerHost = *detail.ExternalHost + } else { + brokerHost = *detail.IP + } + + if brokerHost == "trap.noit.circonus.net" && brokerPort != "443" { + brokerPort = "443" } - valid = true - break + retries := 5 + for attempt := 1; attempt <= retries; attempt++ { + // broker must be reachable and respond within designated time + conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", brokerHost, brokerPort), cm.brokerMaxResponseTime) + if err == nil { + conn.Close() + valid = true + break + } + + cm.Log.Printf("[WARN] Broker '%s' unable to connect, %v. Retrying in 2 seconds, attempt %d of %d.", broker.Name, err, attempt, retries) + time.Sleep(2 * time.Second) + } + if valid { + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name) + } + break + } } return valid } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go index 3f173bf5c..cbe3ba706 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go @@ -7,6 +7,7 @@ package checkmgr import ( "crypto/x509" "encoding/json" + "errors" "fmt" ) @@ -41,17 +42,22 @@ type CACert struct { } // loadCACert loads the CA cert for the broker designated by the submission url -func (cm *CheckManager) loadCACert() { +func (cm *CheckManager) loadCACert() error { if cm.certPool != nil { - return + return nil } cm.certPool = x509.NewCertPool() - cert, err := cm.fetchCert() - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err) + var cert []byte + var err error + + if cm.enabled { + // only attempt to retrieve broker CA cert if + // the check is being managed. + cert, err = cm.fetchCert() + if err != nil { + return err } } @@ -60,12 +66,14 @@ func (cm *CheckManager) loadCACert() { } cm.certPool.AppendCertsFromPEM(cert) + + return nil } // fetchCert fetches CA certificate using Circonus API func (cm *CheckManager) fetchCert() ([]byte, error) { if !cm.enabled { - return circonusCA, nil + return nil, errors.New("check manager is not enabled") } response, err := cm.apih.Get("/pki/ca.crt") @@ -74,8 +82,7 @@ func (cm *CheckManager) fetchCert() ([]byte, error) { } cadata := new(CACert) - err = json.Unmarshal(response, cadata) - if err != nil { + if err := json.Unmarshal(response, cadata); err != nil { return nil, err } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go index 56f6fed28..f9e9987de 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go @@ -10,13 +10,80 @@ import ( "encoding/hex" "errors" "fmt" + "net/url" "strconv" "strings" "time" "github.com/circonus-labs/circonus-gometrics/api" + "github.com/circonus-labs/circonus-gometrics/api/config" ) +// UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.) +func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric) { + // only if check manager is enabled + if !cm.enabled { + return + } + + // only if checkBundle has been populated + if cm.checkBundle == nil { + return + } + + // only if there is *something* to update + if !cm.forceCheckUpdate && len(newMetrics) == 0 && len(cm.metricTags) == 0 { + return + } + + // refresh check bundle (in case there were changes made by other apps or in UI) + cid := cm.checkBundle.CID + checkBundle, err := cm.apih.FetchCheckBundle(api.CIDType(&cid)) + if err != nil { + cm.Log.Printf("[ERROR] unable to fetch up-to-date check bundle %v", err) + return + } + cm.cbmu.Lock() + cm.checkBundle = checkBundle + cm.cbmu.Unlock() + + // check metric_limit and see if it’s 0, if so, don't even bother to try to update the check. + + cm.addNewMetrics(newMetrics) + + if len(cm.metricTags) > 0 { + // note: if a tag has been added (queued) for a metric which never gets sent + // the tags will be discarded. (setting tags does not *create* metrics.) + for metricName, metricTags := range cm.metricTags { + for metricIdx, metric := range cm.checkBundle.Metrics { + if metric.Name == metricName { + cm.checkBundle.Metrics[metricIdx].Tags = metricTags + break + } + } + cm.mtmu.Lock() + delete(cm.metricTags, metricName) + cm.mtmu.Unlock() + } + cm.forceCheckUpdate = true + } + + if cm.forceCheckUpdate { + newCheckBundle, err := cm.apih.UpdateCheckBundle(cm.checkBundle) + if err != nil { + cm.Log.Printf("[ERROR] updating check bundle %v", err) + return + } + + cm.forceCheckUpdate = false + cm.cbmu.Lock() + cm.checkBundle = newCheckBundle + cm.cbmu.Unlock() + cm.inventoryMetrics() + } + +} + // Initialize CirconusMetrics instance. Attempt to find a check otherwise create one. // use cases: // @@ -32,6 +99,8 @@ func (cm *CheckManager) initializeTrapURL() error { cm.trapmu.Lock() defer cm.trapmu.Unlock() + // special case short-circuit: just send to a url, no check management + // up to user to ensure that if url is https that it will work (e.g. not self-signed) if cm.checkSubmissionURL != "" { if !cm.enabled { cm.trapURL = cm.checkSubmissionURL @@ -41,7 +110,7 @@ func (cm *CheckManager) initializeTrapURL() error { } if !cm.enabled { - return errors.New("Unable to initialize trap, check manager is disabled.") + return errors.New("unable to initialize trap, check manager is disabled") } var err error @@ -50,10 +119,13 @@ func (cm *CheckManager) initializeTrapURL() error { var broker *api.Broker if cm.checkSubmissionURL != "" { - check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL) + check, err = cm.fetchCheckBySubmissionURL(cm.checkSubmissionURL) if err != nil { return err } + if !check.Active { + return fmt.Errorf("[ERROR] Check ID %v is not active", check.CID) + } // extract check id from check object returned from looking up using submission url // set m.CheckId to the id // set m.SubmissionUrl to "" to prevent trying to search on it going forward @@ -61,27 +133,44 @@ func (cm *CheckManager) initializeTrapURL() error { // unless the new submission url can be fetched with the API (which is no // longer possible using the original submission url) var id int - id, err = strconv.Atoi(strings.Replace(check.Cid, "/check/", "", -1)) + id, err = strconv.Atoi(strings.Replace(check.CID, "/check/", "", -1)) if err == nil { cm.checkID = api.IDType(id) cm.checkSubmissionURL = "" } else { cm.Log.Printf( "[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\n", - check.Cid, err) + check.CID, err) } } else if cm.checkID > 0 { - check, err = cm.apih.FetchCheckByID(cm.checkID) + cid := fmt.Sprintf("/check/%d", cm.checkID) + check, err = cm.apih.FetchCheck(api.CIDType(&cid)) if err != nil { return err } + if !check.Active { + return fmt.Errorf("[ERROR] Check ID %v is not active", check.CID) + } } else { - searchCriteria := fmt.Sprintf( - "(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)", - cm.checkInstanceID, cm.checkType, cm.checkSearchTag) - checkBundle, err = cm.checkBundleSearch(searchCriteria) - if err != nil { - return err + if checkBundle == nil { + // old search (instanceid as check.target) + searchCriteria := fmt.Sprintf( + "(active:1)(type:\"%s\")(host:\"%s\")(tags:%s)", cm.checkType, cm.checkTarget, strings.Join(cm.checkSearchTag, ",")) + checkBundle, err = cm.checkBundleSearch(searchCriteria, map[string][]string{}) + if err != nil { + return err + } + } + + if checkBundle == nil { + // new search (check.target != instanceid, instanceid encoded in notes field) + searchCriteria := fmt.Sprintf( + "(active:1)(type:\"%s\")(tags:%s)", cm.checkType, strings.Join(cm.checkSearchTag, ",")) + filterCriteria := map[string][]string{"f_notes": []string{*cm.getNotes()}} + checkBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria) + if err != nil { + return err + } } if checkBundle == nil { @@ -96,7 +185,8 @@ func (cm *CheckManager) initializeTrapURL() error { if checkBundle == nil { if check != nil { - checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCid)) + cid := check.CheckBundleCID + checkBundle, err = cm.apih.FetchCheckBundle(api.CIDType(&cid)) if err != nil { return err } @@ -106,7 +196,8 @@ func (cm *CheckManager) initializeTrapURL() error { } if broker == nil { - broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0])) + cid := checkBundle.Brokers[0] + broker, err = cm.apih.FetchBroker(api.CIDType(&cid)) if err != nil { return err } @@ -116,8 +207,33 @@ func (cm *CheckManager) initializeTrapURL() error { cm.checkBundle = checkBundle cm.inventoryMetrics() - // url to which metrics should be PUT - cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL) + // determine the trap url to which metrics should be PUT + if checkBundle.Type == "httptrap" { + if turl, found := checkBundle.Config[config.SubmissionURL]; found { + cm.trapURL = api.URLType(turl) + } else { + if cm.Debug { + cm.Log.Printf("Missing config.%s %+v", config.SubmissionURL, checkBundle) + } + return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.SubmissionURL) + } + } else { + // build a submission_url for non-httptrap checks out of mtev_reverse url + if len(checkBundle.ReverseConnectURLs) == 0 { + return fmt.Errorf("%s is not an HTTPTRAP check and no reverse connection urls found", checkBundle.Checks[0]) + } + mtevURL := checkBundle.ReverseConnectURLs[0] + mtevURL = strings.Replace(mtevURL, "mtev_reverse", "https", 1) + mtevURL = strings.Replace(mtevURL, "check", "module/httptrap", 1) + if rs, found := checkBundle.Config[config.ReverseSecretKey]; found { + cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, rs)) + } else { + if cm.Debug { + cm.Log.Printf("Missing config.%s %+v", config.ReverseSecretKey, checkBundle) + } + return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.ReverseSecretKey) + } + } // used when sending as "ServerName" get around certs not having IP SANS // (cert created with server name as CN but IP used in trap url) @@ -127,26 +243,39 @@ func (cm *CheckManager) initializeTrapURL() error { } cm.trapCN = BrokerCNType(cn) + if cm.enabled { + u, err := url.Parse(string(cm.trapURL)) + if err != nil { + return err + } + if u.Scheme == "https" { + if err := cm.loadCACert(); err != nil { + return err + } + } + } + cm.trapLastUpdate = time.Now() return nil } // Search for a check bundle given a predetermined set of criteria -func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, error) { - checkBundles, err := cm.apih.CheckBundleSearch(api.SearchQueryType(criteria)) +func (cm *CheckManager) checkBundleSearch(criteria string, filter map[string][]string) (*api.CheckBundle, error) { + search := api.SearchQueryType(criteria) + checkBundles, err := cm.apih.SearchCheckBundles(&search, &filter) if err != nil { return nil, err } - if len(checkBundles) == 0 { + if len(*checkBundles) == 0 { return nil, nil // trigger creation of a new check } numActive := 0 checkID := -1 - for idx, check := range checkBundles { + for idx, check := range *checkBundles { if check.Status == statusActive { numActive++ checkID = idx @@ -154,10 +283,12 @@ func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, er } if numActive > 1 { - return nil, fmt.Errorf("[ERROR] Multiple possibilities multiple check bundles match criteria %s\n", criteria) + return nil, fmt.Errorf("[ERROR] multiple check bundles match criteria %s", criteria) } - return &checkBundles[checkID], nil + bundle := (*checkBundles)[checkID] + + return &bundle, nil } // Create a new check to receive metrics @@ -176,22 +307,39 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) return nil, nil, err } - config := api.CheckBundle{ - Brokers: []string{broker.Cid}, - Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret}, + chkcfg := &api.CheckBundle{ + Brokers: []string{broker.CID}, + Config: make(map[config.Key]string), DisplayName: string(cm.checkDisplayName), Metrics: []api.CheckBundleMetric{}, - MetricLimit: 0, - Notes: "", + MetricLimit: config.DefaultCheckBundleMetricLimit, + Notes: cm.getNotes(), Period: 60, Status: statusActive, - Tags: append([]string{string(cm.checkSearchTag)}, cm.checkTags...), - Target: string(cm.checkInstanceID), + Tags: append(cm.checkSearchTag, cm.checkTags...), + Target: string(cm.checkTarget), Timeout: 10, Type: string(cm.checkType), } - checkBundle, err := cm.apih.CreateCheckBundle(config) + if len(cm.customConfigFields) > 0 { + for fld, val := range cm.customConfigFields { + chkcfg.Config[config.Key(fld)] = val + } + } + + // + // use the default config settings if these are NOT set by user configuration + // + if val, ok := chkcfg.Config[config.AsyncMetrics]; !ok || val == "" { + chkcfg.Config[config.AsyncMetrics] = "true" + } + + if val, ok := chkcfg.Config[config.Secret]; !ok || val == "" { + chkcfg.Config[config.Secret] = checkSecret + } + + checkBundle, err := cm.apih.CreateCheckBundle(chkcfg) if err != nil { return nil, nil, err } @@ -209,3 +357,64 @@ func (cm *CheckManager) makeSecret() (string, error) { hash.Write(x) return hex.EncodeToString(hash.Sum(nil))[0:16], nil } + +func (cm *CheckManager) getNotes() *string { + notes := fmt.Sprintf("cgm_instanceid|%s", cm.checkInstanceID) + return ¬es +} + +// FetchCheckBySubmissionURL fetch a check configuration by submission_url +func (cm *CheckManager) fetchCheckBySubmissionURL(submissionURL api.URLType) (*api.Check, error) { + if string(submissionURL) == "" { + return nil, errors.New("[ERROR] Invalid submission URL (blank)") + } + + u, err := url.Parse(string(submissionURL)) + if err != nil { + return nil, err + } + + // valid trap url: scheme://host[:port]/module/httptrap/UUID/secret + + // does it smell like a valid trap url path + if !strings.Contains(u.Path, "/module/httptrap/") { + return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL) + } + + // extract uuid + pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/") + if len(pathParts) != 2 { + return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL) + } + uuid := pathParts[0] + + filter := api.SearchFilterType{"f__check_uuid": []string{uuid}} + + checks, err := cm.apih.SearchChecks(nil, &filter) + if err != nil { + return nil, err + } + + if len(*checks) == 0 { + return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid) + } + + numActive := 0 + checkID := -1 + + for idx, check := range *checks { + if check.Active { + numActive++ + checkID = idx + } + } + + if numActive > 1 { + return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid) + } + + check := (*checks)[checkID] + + return &check, nil + +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go index e23f00a57..f2ef7d6df 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go @@ -16,6 +16,7 @@ import ( "os" "path" "strconv" + "strings" "sync" "time" @@ -58,17 +59,20 @@ type CheckConfig struct { // used to search for a check to use // used as check.target when creating a check InstanceID string - // unique check searching tag + // explicitly set check.target (default: instance id) + TargetHost string + // a custom display name for the check (as viewed in UI Checks) + // default: instance id + DisplayName string + // unique check searching tag (or tags) // used to search for a check to use (combined with instanceid) // used as a regular tag when creating a check SearchTag string - // a custom display name for the check (as viewed in UI Checks) - DisplayName string // httptrap check secret (for creating a check) Secret string // additional tags to add to a check (when creating a check) // these tags will not be added to an existing check - Tags []string + Tags string // max amount of time to to hold on to a submission url // when a given submission fails (due to retries) if the // time the url was last updated is > than this, the trap @@ -81,14 +85,18 @@ type CheckConfig struct { // overrides the behavior and will re-activate the metric when it is // encountered. "(true|false)", default "false" ForceMetricActivation string + // Type of check to use (default: httptrap) + Type string + // Custom check config fields (default: none) + CustomConfigFields map[string]string } // BrokerConfig options for broker type BrokerConfig struct { // a specific broker id (numeric portion of cid) ID string - // a tag that can be used to select 1-n brokers from which to select - // when creating a new check (e.g. datacenter:abc) + // one or more tags used to select 1-n brokers from which to select + // when creating a new check (e.g. datacenter:abc or loc:dfw,dc:abc) SelectTag string // for a broker to be considered viable it must respond to a // connection attempt within this amount of time e.g. 200ms, 2s, 1m @@ -114,11 +122,14 @@ type CheckTypeType string // CheckInstanceIDType check instance id type CheckInstanceIDType string +// CheckTargetType check target/host +type CheckTargetType string + // CheckSecretType check secret type CheckSecretType string // CheckTagsType check tags -type CheckTagsType []string +type CheckTagsType string // CheckDisplayNameType check display name type CheckDisplayNameType string @@ -133,31 +144,43 @@ type CheckManager struct { Debug bool apih *api.API + initialized bool + initializedmu sync.RWMutex + // check checkType CheckTypeType checkID api.IDType checkInstanceID CheckInstanceIDType - checkSearchTag api.SearchTagType + checkTarget CheckTargetType + checkSearchTag api.TagType checkSecret CheckSecretType - checkTags CheckTagsType + checkTags api.TagType + customConfigFields map[string]string checkSubmissionURL api.URLType checkDisplayName CheckDisplayNameType forceMetricActivation bool + forceCheckUpdate bool + + // metric tags + metricTags map[string][]string + mtmu sync.Mutex // broker brokerID api.IDType - brokerSelectTag api.SearchTagType + brokerSelectTag api.TagType brokerMaxResponseTime time.Duration // state - checkBundle *api.CheckBundle - availableMetrics map[string]bool - trapURL api.URLType - trapCN BrokerCNType - trapLastUpdate time.Time - trapMaxURLAge time.Duration - trapmu sync.Mutex - certPool *x509.CertPool + checkBundle *api.CheckBundle + cbmu sync.Mutex + availableMetrics map[string]bool + availableMetricsmu sync.Mutex + trapURL api.URLType + trapCN BrokerCNType + trapLastUpdate time.Time + trapMaxURLAge time.Duration + trapmu sync.Mutex + certPool *x509.CertPool } // Trap config @@ -168,58 +191,58 @@ type Trap struct { // NewCheckManager returns a new check manager func NewCheckManager(cfg *Config) (*CheckManager, error) { + return New(cfg) +} + +// New returns a new check manager +func New(cfg *Config) (*CheckManager, error) { if cfg == nil { - return nil, errors.New("Invalid Check Manager configuration (nil).") + return nil, errors.New("invalid Check Manager configuration (nil)") } - cm := &CheckManager{ - enabled: false, - } + cm := &CheckManager{enabled: true, initialized: false} + // Setup logging for check manager cm.Debug = cfg.Debug - cm.Log = cfg.Log + if cm.Debug && cm.Log == nil { + cm.Log = log.New(os.Stderr, "", log.LstdFlags) + } if cm.Log == nil { - if cm.Debug { - cm.Log = log.New(os.Stderr, "", log.LstdFlags) - } else { - cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) - } + cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) } if cfg.Check.SubmissionURL != "" { cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL) } + // Blank API Token *disables* check management if cfg.API.TokenKey == "" { - if cm.checkSubmissionURL == "" { - return nil, errors.New("Invalid check manager configuration (no API token AND no submission url).") - } - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } - return cm, nil + cm.enabled = false } - // enable check manager - - cm.enabled = true - - // initialize api handle - - cfg.API.Debug = cm.Debug - cfg.API.Log = cm.Log + if !cm.enabled && cm.checkSubmissionURL == "" { + return nil, errors.New("invalid check manager configuration (no API token AND no submission url)") + } - apih, err := api.NewAPI(&cfg.API) - if err != nil { - return nil, err + if cm.enabled { + // initialize api handle + cfg.API.Debug = cm.Debug + cfg.API.Log = cm.Log + apih, err := api.New(&cfg.API) + if err != nil { + return nil, err + } + cm.apih = apih } - cm.apih = apih // initialize check related data - - cm.checkType = defaultCheckType + if cfg.Check.Type != "" { + cm.checkType = CheckTypeType(cfg.Check.Type) + } else { + cm.checkType = defaultCheckType + } idSetting := "0" if cfg.Check.ID != "" { @@ -232,10 +255,9 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { cm.checkID = api.IDType(id) cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID) + cm.checkTarget = CheckTargetType(cfg.Check.TargetHost) cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName) - cm.checkSearchTag = api.SearchTagType(cfg.Check.SearchTag) cm.checkSecret = CheckSecretType(cfg.Check.Secret) - cm.checkTags = cfg.Check.Tags fma := defaultForceMetricActivation if cfg.Check.ForceMetricActivation != "" { @@ -255,13 +277,28 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { if cm.checkInstanceID == "" { cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an)) } + if cm.checkDisplayName == "" { + cm.checkDisplayName = CheckDisplayNameType(cm.checkInstanceID) + } + if cm.checkTarget == "" { + cm.checkTarget = CheckTargetType(cm.checkInstanceID) + } - if cm.checkSearchTag == "" { - cm.checkSearchTag = api.SearchTagType(fmt.Sprintf("service:%s", an)) + if cfg.Check.SearchTag == "" { + cm.checkSearchTag = []string{fmt.Sprintf("service:%s", an)} + } else { + cm.checkSearchTag = strings.Split(strings.Replace(cfg.Check.SearchTag, " ", "", -1), ",") } - if cm.checkDisplayName == "" { - cm.checkDisplayName = CheckDisplayNameType(fmt.Sprintf("%s /cgm", string(cm.checkInstanceID))) + if cfg.Check.Tags != "" { + cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",") + } + + cm.customConfigFields = make(map[string]string) + if len(cfg.Check.CustomConfigFields) > 0 { + for fld, val := range cfg.Check.CustomConfigFields { + cm.customConfigFields[fld] = val + } } dur := cfg.Check.MaxURLAge @@ -275,7 +312,6 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { cm.trapMaxURLAge = maxDur // setup broker - idSetting = "0" if cfg.Broker.ID != "" { idSetting = cfg.Broker.ID @@ -286,7 +322,9 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { } cm.brokerID = api.IDType(id) - cm.brokerSelectTag = api.SearchTagType(cfg.Broker.SelectTag) + if cfg.Broker.SelectTag != "" { + cm.brokerSelectTag = strings.Split(strings.Replace(cfg.Broker.SelectTag, " ", "", -1), ",") + } dur = cfg.Broker.MaxResponseTime if dur == "" { @@ -300,20 +338,56 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { // metrics cm.availableMetrics = make(map[string]bool) + cm.metricTags = make(map[string][]string) - if err := cm.initializeTrapURL(); err != nil { - return nil, err + return cm, nil +} + +// Initialize for sending metrics +func (cm *CheckManager) Initialize() { + + // if not managing the check, quicker initialization + if !cm.enabled { + err := cm.initializeTrapURL() + if err == nil { + cm.initializedmu.Lock() + cm.initialized = true + cm.initializedmu.Unlock() + } else { + cm.Log.Printf("[WARN] error initializing trap %s", err.Error()) + } + return } - return cm, nil + // background initialization when we have to reach out to the api + go func() { + cm.apih.EnableExponentialBackoff() + err := cm.initializeTrapURL() + if err == nil { + cm.initializedmu.Lock() + cm.initialized = true + cm.initializedmu.Unlock() + } else { + cm.Log.Printf("[WARN] error initializing trap %s", err.Error()) + } + cm.apih.DisableExponentialBackoff() + }() +} + +// IsReady reflects if the check has been initialied and metrics can be sent to Circonus +func (cm *CheckManager) IsReady() bool { + cm.initializedmu.RLock() + defer cm.initializedmu.RUnlock() + return cm.initialized } -// GetTrap return the trap url -func (cm *CheckManager) GetTrap() (*Trap, error) { +// GetSubmissionURL returns submission url for circonus +func (cm *CheckManager) GetSubmissionURL() (*Trap, error) { if cm.trapURL == "" { - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } + return nil, fmt.Errorf("[ERROR] no submission url currently available") + // if err := cm.initializeTrapURL(); err != nil { + // return nil, err + // } } trap := &Trap{} @@ -327,7 +401,9 @@ func (cm *CheckManager) GetTrap() (*Trap, error) { if u.Scheme == "https" { if cm.certPool == nil { - cm.loadCACert() + if err := cm.loadCACert(); err != nil { + return nil, err + } } t := &tls.Config{ RootCAs: cm.certPool, diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go index 8f03d4476..eb8603a84 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go @@ -10,12 +10,18 @@ import ( // IsMetricActive checks whether a given metric name is currently active(enabled) func (cm *CheckManager) IsMetricActive(name string) bool { + cm.availableMetricsmu.Lock() + defer cm.availableMetricsmu.Unlock() + active, _ := cm.availableMetrics[name] return active } // ActivateMetric determines if a given metric should be activated func (cm *CheckManager) ActivateMetric(name string) bool { + cm.availableMetricsmu.Lock() + defer cm.availableMetricsmu.Unlock() + active, exists := cm.availableMetrics[name] if !exists { @@ -29,44 +35,101 @@ func (cm *CheckManager) ActivateMetric(name string) bool { return false } -// AddNewMetrics updates a check bundle with new metrics -func (cm *CheckManager) AddNewMetrics(newMetrics map[string]*api.CheckBundleMetric) { - // only if check manager is enabled - if !cm.enabled { - return +// AddMetricTags updates check bundle metrics with tags +func (cm *CheckManager) AddMetricTags(metricName string, tags []string, appendTags bool) bool { + tagsUpdated := false + + if appendTags && len(tags) == 0 { + return tagsUpdated + } + + currentTags, exists := cm.metricTags[metricName] + if !exists { + foundMetric := false + + if cm.checkBundle != nil { + for _, metric := range cm.checkBundle.Metrics { + if metric.Name == metricName { + foundMetric = true + currentTags = metric.Tags + break + } + } + } + + if !foundMetric { + currentTags = []string{} + } + } + + action := "" + if appendTags { + numNewTags := countNewTags(currentTags, tags) + if numNewTags > 0 { + action = "Added" + currentTags = append(currentTags, tags...) + tagsUpdated = true + } + } else { + if len(tags) != len(currentTags) { + action = "Set" + currentTags = tags + tagsUpdated = true + } else { + numNewTags := countNewTags(currentTags, tags) + if numNewTags > 0 { + action = "Set" + currentTags = tags + tagsUpdated = true + } + } + } + + if tagsUpdated { + cm.metricTags[metricName] = currentTags + } + + if cm.Debug && action != "" { + cm.Log.Printf("[DEBUG] %s metric tag(s) %s %v\n", action, metricName, tags) } - // only if checkBundle has been populated - if cm.checkBundle == nil { - return + return tagsUpdated +} + +// addNewMetrics updates a check bundle with new metrics +func (cm *CheckManager) addNewMetrics(newMetrics map[string]*api.CheckBundleMetric) bool { + updatedCheckBundle := false + + if cm.checkBundle == nil || len(newMetrics) == 0 { + return updatedCheckBundle } - newCheckBundle := cm.checkBundle - numCurrMetrics := len(newCheckBundle.Metrics) + cm.cbmu.Lock() + defer cm.cbmu.Unlock() + + numCurrMetrics := len(cm.checkBundle.Metrics) numNewMetrics := len(newMetrics) - if numCurrMetrics+numNewMetrics >= cap(newCheckBundle.Metrics) { + if numCurrMetrics+numNewMetrics >= cap(cm.checkBundle.Metrics) { nm := make([]api.CheckBundleMetric, numCurrMetrics+numNewMetrics) - copy(nm, newCheckBundle.Metrics) - newCheckBundle.Metrics = nm + copy(nm, cm.checkBundle.Metrics) + cm.checkBundle.Metrics = nm } - newCheckBundle.Metrics = newCheckBundle.Metrics[0 : numCurrMetrics+numNewMetrics] + cm.checkBundle.Metrics = cm.checkBundle.Metrics[0 : numCurrMetrics+numNewMetrics] i := 0 for _, metric := range newMetrics { - newCheckBundle.Metrics[numCurrMetrics+i] = *metric + cm.checkBundle.Metrics[numCurrMetrics+i] = *metric i++ + updatedCheckBundle = true } - checkBundle, err := cm.apih.UpdateCheckBundle(newCheckBundle) - if err != nil { - cm.Log.Printf("[ERROR] updating check bundle with new metrics %v", err) - return + if updatedCheckBundle { + cm.forceCheckUpdate = true } - cm.checkBundle = checkBundle - cm.inventoryMetrics() + return updatedCheckBundle } // inventoryMetrics creates list of active metrics in check bundle @@ -75,5 +138,35 @@ func (cm *CheckManager) inventoryMetrics() { for _, metric := range cm.checkBundle.Metrics { availableMetrics[metric.Name] = metric.Status == "active" } + cm.availableMetricsmu.Lock() cm.availableMetrics = availableMetrics + cm.availableMetricsmu.Unlock() +} + +// countNewTags returns a count of new tags which do not exist in the current list of tags +func countNewTags(currTags []string, newTags []string) int { + if len(newTags) == 0 { + return 0 + } + + if len(currTags) == 0 { + return len(newTags) + } + + newTagCount := 0 + + for _, newTag := range newTags { + found := false + for _, currTag := range currTags { + if newTag == currTag { + found = true + break + } + } + if !found { + newTagCount++ + } + } + + return newTagCount } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go index 34166c007..ba73ae625 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go @@ -34,6 +34,7 @@ import ( "io/ioutil" "log" "os" + "strconv" "sync" "time" @@ -47,24 +48,34 @@ const ( // Config options for circonus-gometrics type Config struct { - Log *log.Logger - Debug bool + Log *log.Logger + Debug bool + ResetCounters string // reset/delete counters on flush (default true) + ResetGauges string // reset/delete gauges on flush (default true) + ResetHistograms string // reset/delete histograms on flush (default true) + ResetText string // reset/delete text on flush (default true) // API, Check and Broker configuration options CheckManager checkmgr.Config - // how frequenly to submit metrics to Circonus, default 10 seconds + // how frequenly to submit metrics to Circonus, default 10 seconds. + // Set to 0 to disable automatic flushes and call Flush manually. Interval string } // CirconusMetrics state type CirconusMetrics struct { - Log *log.Logger - Debug bool - flushInterval time.Duration - flushing bool - flushmu sync.Mutex - check *checkmgr.CheckManager + Log *log.Logger + Debug bool + + resetCounters bool + resetGauges bool + resetHistograms bool + resetText bool + flushInterval time.Duration + flushing bool + flushmu sync.Mutex + check *checkmgr.CheckManager counters map[string]uint64 cm sync.Mutex @@ -90,9 +101,14 @@ type CirconusMetrics struct { // NewCirconusMetrics returns a CirconusMetrics instance func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) { + return New(cfg) +} + +// New returns a CirconusMetrics instance +func New(cfg *Config) (*CirconusMetrics, error) { if cfg == nil { - return nil, errors.New("Invalid configuration (nil).") + return nil, errors.New("invalid configuration (nil)") } cm := &CirconusMetrics{ @@ -105,55 +121,107 @@ func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) { textFuncs: make(map[string]func() string), } - cm.Debug = cfg.Debug - if cm.Debug { - if cfg.Log == nil { + // Logging + { + cm.Debug = cfg.Debug + cm.Log = cfg.Log + + if cm.Debug && cm.Log == nil { cm.Log = log.New(os.Stderr, "", log.LstdFlags) - } else { - cm.Log = cfg.Log + } + if cm.Log == nil { + cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) } } - if cm.Log == nil { - cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) + + // Flush Interval + { + fi := defaultFlushInterval + if cfg.Interval != "" { + fi = cfg.Interval + } + + dur, err := time.ParseDuration(fi) + if err != nil { + return nil, err + } + cm.flushInterval = dur } - fi := defaultFlushInterval - if cfg.Interval != "" { - fi = cfg.Interval + // metric resets + + cm.resetCounters = true + if cfg.ResetCounters != "" { + setting, err := strconv.ParseBool(cfg.ResetCounters) + if err != nil { + return nil, err + } + cm.resetCounters = setting } - dur, err := time.ParseDuration(fi) - if err != nil { - return nil, err + cm.resetGauges = true + if cfg.ResetGauges != "" { + setting, err := strconv.ParseBool(cfg.ResetGauges) + if err != nil { + return nil, err + } + cm.resetGauges = setting } - cm.flushInterval = dur - cfg.CheckManager.Debug = cm.Debug - cfg.CheckManager.Log = cm.Log + cm.resetHistograms = true + if cfg.ResetHistograms != "" { + setting, err := strconv.ParseBool(cfg.ResetHistograms) + if err != nil { + return nil, err + } + cm.resetHistograms = setting + } - check, err := checkmgr.NewCheckManager(&cfg.CheckManager) - if err != nil { - return nil, err + cm.resetText = true + if cfg.ResetText != "" { + setting, err := strconv.ParseBool(cfg.ResetText) + if err != nil { + return nil, err + } + cm.resetText = setting } - cm.check = check - if _, err := cm.check.GetTrap(); err != nil { - return nil, err + // check manager + { + cfg.CheckManager.Debug = cm.Debug + cfg.CheckManager.Log = cm.Log + + check, err := checkmgr.New(&cfg.CheckManager) + if err != nil { + return nil, err + } + cm.check = check + } + + // start background initialization + cm.check.Initialize() + + // if automatic flush is enabled, start it. + // note: submit will jettison metrics until initialization has completed. + if cm.flushInterval > time.Duration(0) { + go func() { + for range time.NewTicker(cm.flushInterval).C { + cm.Flush() + } + }() } return cm, nil } -// Start initializes the CirconusMetrics instance based on -// configuration settings and sets the httptrap check url to -// which metrics should be sent. It then starts a perdiodic -// submission process of all metrics collected. +// Start deprecated NOP, automatic flush is started in New if flush interval > 0. func (m *CirconusMetrics) Start() { - go func() { - for _ = range time.NewTicker(m.flushInterval).C { - m.Flush() - } - }() + return +} + +// Ready returns true or false indicating if the check is ready to accept metrics +func (m *CirconusMetrics) Ready() bool { + return m.check.IsReady() } // Flush metrics kicks off the process of sending metrics to Circonus @@ -186,7 +254,7 @@ func (m *CirconusMetrics) Flush() { } if send { output[name] = map[string]interface{}{ - "_type": "n", + "_type": "L", "_value": value, } } @@ -246,7 +314,13 @@ func (m *CirconusMetrics) Flush() { } } - m.submit(output, newMetrics) + if len(output) > 0 { + m.submit(output, newMetrics) + } else { + if m.Debug { + m.Log.Println("[DEBUG] No metrics to send, skipping") + } + } m.flushmu.Lock() m.flushing = false diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go index 2b34961f1..2311b0a41 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go @@ -4,6 +4,8 @@ package circonusgometrics +import "fmt" + // A Counter is a monotonically increasing unsigned integer. // // Use a counter to derive rates (e.g., record total number of requests, derive @@ -40,6 +42,19 @@ func (m *CirconusMetrics) RemoveCounter(metric string) { delete(m.counters, metric) } +// GetCounterTest returns the current value for a counter. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetCounterTest(metric string) (uint64, error) { + m.cm.Lock() + defer m.cm.Unlock() + + if val, ok := m.counters[metric]; ok { + return val, nil + } + + return 0, fmt.Errorf("Counter metric '%s' not found", metric) + +} + // SetCounterFunc set counter to a function [called at flush interval] func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) { m.cfm.Lock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go index b44236959..9dce5cdf5 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go @@ -32,6 +32,18 @@ func (m *CirconusMetrics) RemoveGauge(metric string) { delete(m.gauges, metric) } +// GetGaugeTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetGaugeTest(metric string) (string, error) { + m.gm.Lock() + defer m.gm.Unlock() + + if val, ok := m.gauges[metric]; ok { + return val, nil + } + + return "", fmt.Errorf("Gauge metric '%s' not found", metric) +} + // SetGaugeFunc sets a gauge to a function [called at flush interval] func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) { m.gfm.Lock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go index 59d19e20f..71a05054e 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go @@ -5,6 +5,7 @@ package circonusgometrics import ( + "fmt" "sync" "github.com/circonus-labs/circonusllhist" @@ -29,19 +30,32 @@ func (m *CirconusMetrics) RecordValue(metric string, val float64) { // SetHistogramValue adds a value to a histogram func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { - m.NewHistogram(metric) + hist := m.NewHistogram(metric) - m.histograms[metric].rw.Lock() - defer m.histograms[metric].rw.Unlock() + m.hm.Lock() + hist.rw.Lock() + hist.hist.RecordValue(val) + hist.rw.Unlock() + m.hm.Unlock() +} + +// GetHistogramTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetHistogramTest(metric string) ([]string, error) { + m.hm.Lock() + defer m.hm.Unlock() - m.histograms[metric].hist.RecordValue(val) + if hist, ok := m.histograms[metric]; ok { + return hist.hist.DecStrings(), nil + } + + return []string{""}, fmt.Errorf("Histogram metric '%s' not found", metric) } // RemoveHistogram removes a histogram func (m *CirconusMetrics) RemoveHistogram(metric string) { m.hm.Lock() - defer m.hm.Unlock() delete(m.histograms, metric) + m.hm.Unlock() } // NewHistogram returns a histogram instance. @@ -71,7 +85,6 @@ func (h *Histogram) Name() string { // RecordValue records the given value to a histogram instance func (h *Histogram) RecordValue(v float64) { h.rw.Lock() - defer h.rw.Unlock() - h.hist.RecordValue(v) + h.rw.Unlock() } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/metrics.go new file mode 100644 index 000000000..85812f195 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/metrics.go @@ -0,0 +1,15 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package circonusgometrics + +// SetMetricTags sets the tags for the named metric and flags a check update is needed +func (m *CirconusMetrics) SetMetricTags(name string, tags []string) bool { + return m.check.AddMetricTags(name, tags, false) +} + +// AddMetricTags appends tags to any existing tags for the named metric and flags a check update is needed +func (m *CirconusMetrics) AddMetricTags(name string, tags []string) bool { + return m.check.AddMetricTags(name, tags, true) +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go index abeb73006..3b0c0e0df 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "io/ioutil" "log" "net" @@ -20,13 +21,19 @@ import ( ) func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) { - if len(newMetrics) > 0 { - m.check.AddNewMetrics(newMetrics) + + // if there is nowhere to send metrics to, just return. + if !m.check.IsReady() { + m.Log.Printf("[WARN] check not ready, skipping metric submission") + return } + // update check if there are any new metrics or, if metric tags have been added since last submit + m.check.UpdateCheck(newMetrics) + str, err := json.Marshal(output) if err != nil { - m.Log.Printf("[ERROR] marshling output %+v", err) + m.Log.Printf("[ERROR] marshaling output %+v", err) return } @@ -42,7 +49,7 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s } func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { - trap, err := m.check.GetTrap() + trap, err := m.check.GetSubmissionURL() if err != nil { return 0, err } @@ -53,8 +60,32 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { if err != nil { return 0, err } + req.Header.Add("Content-Type", "application/json") req.Header.Add("Accept", "application/json") + // keep last HTTP error in the event of retry failure + var lastHTTPError error + retryPolicy := func(resp *http.Response, err error) (bool, error) { + if err != nil { + lastHTTPError = err + return true, err + } + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || resp.StatusCode >= 500 { + body, readErr := ioutil.ReadAll(resp.Body) + if readErr != nil { + lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr) + } else { + lastHTTPError = fmt.Errorf("- last HTTP error: %d %s", resp.StatusCode, string(body)) + } + return true, nil + } + return false, nil + } + client := retryablehttp.NewClient() if trap.URL.Scheme == "https" { client.HTTPClient.Transport = &http.Transport{ @@ -82,10 +113,17 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { DisableCompression: true, } } - client.RetryWaitMin = 10 * time.Millisecond - client.RetryWaitMax = 50 * time.Millisecond + client.RetryWaitMin = 1 * time.Second + client.RetryWaitMax = 5 * time.Second client.RetryMax = 3 - client.Logger = m.Log + // retryablehttp only groks log or no log + // but, outputs everything as [DEBUG] messages + if m.Debug { + client.Logger = m.Log + } else { + client.Logger = log.New(ioutil.Discard, "", log.LstdFlags) + } + client.CheckRetry = retryPolicy attempts := -1 client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) { @@ -94,6 +132,9 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { resp, err := client.Do(req) if err != nil { + if lastHTTPError != nil { + return 0, fmt.Errorf("[ERROR] submitting: %+v %+v", err, lastHTTPError) + } if attempts == client.RetryMax { m.check.RefreshTrap() } @@ -107,9 +148,8 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { } var response map[string]interface{} - err = json.Unmarshal(body, &response) - if err != nil { - m.Log.Printf("[ERROR] parsing body, proceeding. %s\n", err) + if err := json.Unmarshal(body, &response); err != nil { + m.Log.Printf("[ERROR] parsing body, proceeding. %v (%s)\n", err, body) } if resp.StatusCode != 200 { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/util.go b/vendor/github.com/circonus-labs/circonus-gometrics/util.go index d5cc7d56c..4428e8985 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/util.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/util.go @@ -42,59 +42,103 @@ func (m *CirconusMetrics) Reset() { // snapshot returns a copy of the values of all registered counters and gauges. func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) { + c = m.snapCounters() + g = m.snapGauges() + h = m.snapHistograms() + t = m.snapText() + + return +} + +func (m *CirconusMetrics) snapCounters() map[string]uint64 { m.cm.Lock() defer m.cm.Unlock() - m.cfm.Lock() defer m.cfm.Unlock() - m.gm.Lock() - defer m.gm.Unlock() - - m.gfm.Lock() - defer m.gfm.Unlock() - - m.hm.Lock() - defer m.hm.Unlock() + c := make(map[string]uint64, len(m.counters)+len(m.counterFuncs)) - m.tm.Lock() - defer m.tm.Unlock() - - m.tfm.Lock() - defer m.tfm.Unlock() - - c = make(map[string]uint64, len(m.counters)+len(m.counterFuncs)) for n, v := range m.counters { c[n] = v } + if m.resetCounters && len(c) > 0 { + m.counters = make(map[string]uint64) + } for n, f := range m.counterFuncs { c[n] = f() } + if m.resetCounters && len(c) > 0 { + m.counterFuncs = make(map[string]func() uint64) + } + + return c +} + +func (m *CirconusMetrics) snapGauges() map[string]string { + m.gm.Lock() + defer m.gm.Unlock() + m.gfm.Lock() + defer m.gfm.Unlock() + + g := make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) - //g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs)) - g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) for n, v := range m.gauges { g[n] = v } + if m.resetGauges && len(g) > 0 { + m.gauges = make(map[string]string) + } for n, f := range m.gaugeFuncs { g[n] = m.gaugeValString(f()) } + if m.resetGauges && len(g) > 0 { + m.gaugeFuncs = make(map[string]func() int64) + } + + return g +} + +func (m *CirconusMetrics) snapHistograms() map[string]*circonusllhist.Histogram { + m.hm.Lock() + defer m.hm.Unlock() + + h := make(map[string]*circonusllhist.Histogram, len(m.histograms)) - h = make(map[string]*circonusllhist.Histogram, len(m.histograms)) for n, hist := range m.histograms { + hist.rw.Lock() h[n] = hist.hist.CopyAndReset() + hist.rw.Unlock() } + if m.resetHistograms && len(h) > 0 { + m.histograms = make(map[string]*Histogram) + } + + return h +} + +func (m *CirconusMetrics) snapText() map[string]string { + m.tm.Lock() + defer m.tm.Unlock() + m.tfm.Lock() + defer m.tfm.Unlock() + + t := make(map[string]string, len(m.text)+len(m.textFuncs)) - t = make(map[string]string, len(m.text)+len(m.textFuncs)) for n, v := range m.text { t[n] = v } + if m.resetText && len(t) > 0 { + m.text = make(map[string]string) + } for n, f := range m.textFuncs { t[n] = f() } + if m.resetText && len(t) > 0 { + m.textFuncs = make(map[string]func() string) + } - return + return t } diff --git a/vendor/modules.txt b/vendor/modules.txt index b5ee9a945..47c0970bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,9 +1,10 @@ # github.com/armon/go-proxyproto v0.0.0-20150207025855-609d6338d3a7 github.com/armon/go-proxyproto -# github.com/circonus-labs/circonus-gometrics v0.0.0-20160830164725-f8c68ed96a06 +# github.com/circonus-labs/circonus-gometrics v1.0.0 github.com/circonus-labs/circonus-gometrics github.com/circonus-labs/circonus-gometrics/api github.com/circonus-labs/circonus-gometrics/checkmgr +github.com/circonus-labs/circonus-gometrics/api/config # github.com/circonus-labs/circonusllhist v0.0.0-20160526043813-d724266ae527 github.com/circonus-labs/circonusllhist # github.com/cyberdelia/go-metrics-graphite v0.0.0-20150826032200-b8345b7f01d5 From f36bab4937012c71ffcd405b0b50c9562471acac Mon Sep 17 00:00:00 2001 From: JeremyWhite Date: Fri, 26 Oct 2018 10:06:13 -0500 Subject: [PATCH 44/44] issue 562 update change log glob.matching.disabled --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58d06b9c7..57dff8d22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,7 +48,7 @@ * [Issue #548](https://github.com/fabiolb/fabio/issues/548): Slow glob matching with large number of services - This patch adds the new `glob.matching.enabled` option which controls whether glob matching is enabled for + This patch adds the new `glob.matching.disabled` option which controls whether glob matching is enabled for route lookups. If the number of routes is large then the glob matching can have a performance impact and disabling it may help.