Skip to content
This repository has been archived by the owner on Mar 8, 2023. It is now read-only.

Commit

Permalink
Add filtering on server status
Browse files Browse the repository at this point in the history
Allow excluding metrics for server slots depending on their status. Using
this you can avoid reporting metrics for unused server slots when using
dynamic scaling in haproxy.

E.g. you can use

  --haproxy.server-exclude-states='DOWN,MAINT,MAINT (resolution)'

Signed-off-by: Magnus Hyllander <magnus.hyllander@hivestreaming.com>
  • Loading branch information
mhyllander committed May 28, 2020
1 parent db9370a commit e657460
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 35 deletions.
57 changes: 34 additions & 23 deletions haproxy_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,19 @@ const (
// pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
// HAProxy 1.7
// pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,agent_status,agent_code,agent_duration,check_desc,agent_desc,check_rise,check_fall,check_health,agent_rise,agent_fall,agent_health,addr,cookie,mode,algo,conn_rate,conn_rate_max,conn_tot,intercepted,dcon,dses
pxnameField = 0
svnameField = 1
typeField = 32
minimumCsvFieldCount = 33
statusField = 17
qtimeMsField = 58
ctimeMsField = 59
rtimeMsField = 60
ttimeMsField = 61

showStatCmd = "show stat\n"
showInfoCmd = "show info\n"
excludedServerStates = ""
showStatCmd = "show stat\n"
showInfoCmd = "show info\n"
)

var (
Expand Down Expand Up @@ -206,11 +210,12 @@ type Exporter struct {
up prometheus.Gauge
totalScrapes, csvParseFailures prometheus.Counter
serverMetrics map[int]*prometheus.Desc
excludedServerStates map[string]struct{}
logger log.Logger
}

// NewExporter returns an initialized Exporter.
func NewExporter(uri string, sslVerify bool, selectedServerMetrics map[int]*prometheus.Desc, timeout time.Duration, logger log.Logger) (*Exporter, error) {
func NewExporter(uri string, sslVerify bool, selectedServerMetrics map[int]*prometheus.Desc, excludedServerStates string, timeout time.Duration, logger log.Logger) (*Exporter, error) {
u, err := url.Parse(uri)
if err != nil {
return nil, err
Expand All @@ -228,6 +233,11 @@ func NewExporter(uri string, sslVerify bool, selectedServerMetrics map[int]*prom
return nil, fmt.Errorf("unsupported scheme: %q", u.Scheme)
}

excludedServerStatesMap := map[string]struct{}{}
for _, f := range strings.Split(excludedServerStates, ",") {
excludedServerStatesMap[f] = struct{}{}
}

return &Exporter{
URI: uri,
fetchInfo: fetchInfo,
Expand All @@ -247,8 +257,9 @@ func NewExporter(uri string, sslVerify bool, selectedServerMetrics map[int]*prom
Name: "exporter_csv_parse_failures",
Help: "Number of errors while parsing CSV.",
}),
serverMetrics: selectedServerMetrics,
logger: logger,
serverMetrics: selectedServerMetrics,
excludedServerStates: excludedServerStatesMap,
logger: logger,
}, nil
}

Expand Down Expand Up @@ -410,7 +421,7 @@ func (e *Exporter) parseRow(csvRow []string, ch chan<- prometheus.Metric) {
return
}

pxname, svname, typ := csvRow[0], csvRow[1], csvRow[32]
pxname, svname, status, typ := csvRow[pxnameField], csvRow[svnameField], csvRow[statusField], csvRow[typeField]

const (
frontend = "0"
Expand All @@ -424,15 +435,18 @@ func (e *Exporter) parseRow(csvRow []string, ch chan<- prometheus.Metric) {
case backend:
e.exportCsvFields(backendMetrics, csvRow, ch, pxname)
case server:
e.exportCsvFields(e.serverMetrics, csvRow, ch, pxname, svname)

if _, ok := e.excludedServerStates[status]; !ok {
e.exportCsvFields(e.serverMetrics, csvRow, ch, pxname, svname)
}
}
}

func parseStatusField(value string) int64 {
switch value {
case "UP", "UP 1/3", "UP 2/3", "OPEN", "no check":
case "UP", "UP 1/3", "UP 2/3", "OPEN", "no check", "DRAIN":
return 1
case "DOWN", "DOWN 1/2", "NOLB", "MAINT":
default: //case "DOWN", "DOWN 1/2", "NOLB", "MAINT", "MAINT(via)", "MAINT(resolution)":
return 0
}
return 0
Expand Down Expand Up @@ -481,20 +495,16 @@ func filterServerMetrics(filter string) (map[int]*prometheus.Desc, error) {
return metrics, nil
}

selected := map[int]struct{}{}
for _, f := range strings.Split(filter, ",") {
field, err := strconv.Atoi(f)
if err != nil {
return nil, fmt.Errorf("invalid server metric field number: %v", f)
}
selected[field] = struct{}{}
}

for field, metric := range serverMetrics {
if _, ok := selected[field]; ok {
if metric, ok := serverMetrics[field]; ok {
metrics[field] = metric
}
}

return metrics, nil
}

Expand All @@ -509,13 +519,14 @@ func main() {
https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics.`

var (
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9101").String()
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String()
haProxyScrapeURI = kingpin.Flag("haproxy.scrape-uri", "URI on which to scrape HAProxy.").Default("http://localhost/;csv").String()
haProxySSLVerify = kingpin.Flag("haproxy.ssl-verify", "Flag that enables SSL certificate verification for the scrape URI").Default("true").Bool()
haProxyServerMetricFields = kingpin.Flag("haproxy.server-metric-fields", "Comma-separated list of exported server metrics. See http://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1").Default(serverMetrics.String()).String()
haProxyTimeout = kingpin.Flag("haproxy.timeout", "Timeout for trying to get stats from HAProxy.").Default("5s").Duration()
haProxyPidFile = kingpin.Flag("haproxy.pid-file", pidFileHelpText).Default("").String()
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9101").String()
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String()
haProxyScrapeURI = kingpin.Flag("haproxy.scrape-uri", "URI on which to scrape HAProxy.").Default("http://localhost/;csv").String()
haProxySSLVerify = kingpin.Flag("haproxy.ssl-verify", "Flag that enables SSL certificate verification for the scrape URI").Default("true").Bool()
haProxyServerMetricFields = kingpin.Flag("haproxy.server-metric-fields", "Comma-separated list of exported server metrics. See http://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1").Default(serverMetrics.String()).String()
haProxyServerExcludeStates = kingpin.Flag("haproxy.server-exclude-states", "Comma-separated list of exported server states to exclude. See https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1, field 17 statuus").Default(excludedServerStates).String()
haProxyTimeout = kingpin.Flag("haproxy.timeout", "Timeout for trying to get stats from HAProxy.").Default("5s").Duration()
haProxyPidFile = kingpin.Flag("haproxy.pid-file", pidFileHelpText).Default("").String()
)

promlogConfig := &promlog.Config{}
Expand All @@ -533,7 +544,7 @@ func main() {
level.Info(logger).Log("msg", "Starting haproxy_exporter", "version", version.Info())
level.Info(logger).Log("msg", "Build context", "context", version.BuildContext())

exporter, err := NewExporter(*haProxyScrapeURI, *haProxySSLVerify, selectedServerMetrics, *haProxyTimeout, logger)
exporter, err := NewExporter(*haProxyScrapeURI, *haProxySSLVerify, selectedServerMetrics, *haProxyServerExcludeStates, *haProxyTimeout, logger)
if err != nil {
level.Error(logger).Log("msg", "Error creating an exporter", "err", err)
os.Exit(1)
Expand Down
24 changes: 12 additions & 12 deletions haproxy_exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func TestInvalidConfig(t *testing.T) {
h := newHaproxy([]byte("not,enough,fields"))
defer h.Close()

e, _ := NewExporter(h.URL, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, _ := NewExporter(h.URL, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())

expectMetrics(t, e, "invalid_config.metrics")
}
Expand All @@ -83,7 +83,7 @@ func TestServerWithoutChecks(t *testing.T) {
h := newHaproxy([]byte("test,127.0.0.1:8080,0,0,0,0,0,0,0,0,,0,,0,0,0,0,no check,1,1,0,0,,,0,,1,1,1,,0,,2,0,,0,,,,0,0,0,0,0,0,0,,,,0,0,,,,,,,,,,,"))
defer h.Close()

e, _ := NewExporter(h.URL, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, _ := NewExporter(h.URL, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())

expectMetrics(t, e, "server_without_checks.metrics")
}
Expand All @@ -101,7 +101,7 @@ foo,BACKEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4O
h := newHaproxy([]byte(data))
defer h.Close()

e, _ := NewExporter(h.URL, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, _ := NewExporter(h.URL, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())

expectMetrics(t, e, "server_broken_csv.metrics")
}
Expand All @@ -114,7 +114,7 @@ foo,BACKEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,
h := newHaproxy([]byte(data))
defer h.Close()

e, _ := NewExporter(h.URL, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, _ := NewExporter(h.URL, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())

expectMetrics(t, e, "older_haproxy_versions.metrics")
}
Expand All @@ -123,7 +123,7 @@ func TestConfigChangeDetection(t *testing.T) {
h := newHaproxy([]byte(""))
defer h.Close()

e, _ := NewExporter(h.URL, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, _ := NewExporter(h.URL, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())
ch := make(chan prometheus.Metric)

go func() {
Expand All @@ -150,7 +150,7 @@ func TestDeadline(t *testing.T) {
s.Close()
}()

e, err := NewExporter(s.URL, true, serverMetrics, 1*time.Second, log.NewNopLogger())
e, err := NewExporter(s.URL, true, serverMetrics, excludedServerStates, 1*time.Second, log.NewNopLogger())
if err != nil {
t.Fatal(err)
}
Expand All @@ -162,7 +162,7 @@ func TestNotFound(t *testing.T) {
s := httptest.NewServer(http.NotFoundHandler())
defer s.Close()

e, err := NewExporter(s.URL, true, serverMetrics, 1*time.Second, log.NewNopLogger())
e, err := NewExporter(s.URL, true, serverMetrics, excludedServerStates, 1*time.Second, log.NewNopLogger())
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -221,7 +221,7 @@ func TestUnixDomain(t *testing.T) {
}
defer srv.Close()

e, err := NewExporter("unix:"+testSocket, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, err := NewExporter("unix:"+testSocket, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())
if err != nil {
t.Fatal(err)
}
Expand All @@ -238,7 +238,7 @@ func TestUnixDomainNotFound(t *testing.T) {
if err := os.Remove(testSocket); err != nil && !os.IsNotExist(err) {
t.Fatal(err)
}
e, _ := NewExporter("unix:"+testSocket, true, serverMetrics, 1*time.Second, log.NewNopLogger())
e, _ := NewExporter("unix:"+testSocket, true, serverMetrics, excludedServerStates, 1*time.Second, log.NewNopLogger())
expectMetrics(t, e, "unix_domain_not_found.metrics")
}

Expand Down Expand Up @@ -271,13 +271,13 @@ func TestUnixDomainDeadline(t *testing.T) {
}
}()

e, _ := NewExporter("unix:"+testSocket, true, serverMetrics, 1*time.Second, log.NewNopLogger())
e, _ := NewExporter("unix:"+testSocket, true, serverMetrics, excludedServerStates, 1*time.Second, log.NewNopLogger())

expectMetrics(t, e, "unix_domain_deadline.metrics")
}

func TestInvalidScheme(t *testing.T) {
e, err := NewExporter("gopher://gopher.quux.org", true, serverMetrics, 1*time.Second, log.NewNopLogger())
e, err := NewExporter("gopher://gopher.quux.org", true, serverMetrics, excludedServerStates, 1*time.Second, log.NewNopLogger())
if expect, got := (*Exporter)(nil), e; expect != got {
t.Errorf("expected %v, got %v", expect, got)
}
Expand Down Expand Up @@ -352,7 +352,7 @@ func BenchmarkExtract(b *testing.B) {
h := newHaproxy(config)
defer h.Close()

e, _ := NewExporter(h.URL, true, serverMetrics, 5*time.Second, log.NewNopLogger())
e, _ := NewExporter(h.URL, true, serverMetrics, excludedServerStates, 5*time.Second, log.NewNopLogger())

var before, after runtime.MemStats
runtime.GC()
Expand Down

0 comments on commit e657460

Please sign in to comment.