Skip to content

Commit

Permalink
[featue] Add support for prometheus format
Browse files Browse the repository at this point in the history
now we can query servers status with prometheus format like:
curl  'http://localhost:8081/status?format=prometheus&status=down'
  • Loading branch information
zhouchangxun committed Jun 16, 2020
1 parent 52efc9a commit cdc74da
Show file tree
Hide file tree
Showing 2 changed files with 239 additions and 3 deletions.
51 changes: 48 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ Key features:
- Supports health detection for both four-tier and seven-tier back-end servers
- Four-layer supported detection type: `tcp` / `udp` / `http`
- Seven-layer supported detection Type: `http` / `fastcgi`
- Provide a unified http status query interface, output format: `html` / `json` / `csv`
- Provide a unified http status query interface, output format: `html` / `json` / `csv` / `prometheus`

Installation
============
Expand Down Expand Up @@ -140,7 +140,7 @@ stream {

**status interface**

One typical output is
One typical output is(json format)
``` python
root@changxun-PC:~/nginx-dev/ngx_healthcheck_module# curl localhost/status
{"servers": {
Expand All @@ -159,6 +159,47 @@ root@changxun-PC:~/nginx-dev/ngx_healthcheck_module# curl localhost/status
}}
root@changxun-PC:~/nginx-dev/ngx_healthcheck_module#
```
or (prometheus format)
``` python
root@changxun-PC:~/nginx-dev/ngx_healthcheck_module# curl localhost/status
# HELP nginx_upstream_count_total Nginx total number of servers
# TYPE nginx_upstream_count_total gauge
nginx_upstream_count_total 6
# HELP nginx_upstream_count_up Nginx total number of servers that are UP
# TYPE nginx_upstream_count_up gauge
nginx_upstream_count_up 0
# HELP nginx_upstream_count_down Nginx total number of servers that are DOWN
# TYPE nginx_upstream_count_down gauge
nginx_upstream_count_down 6
# HELP nginx_upstream_count_generation Nginx generation
# TYPE nginx_upstream_count_generation gauge
nginx_upstream_count_generation 1
# HELP nginx_upstream_server_rise Nginx rise counter
# TYPE nginx_upstream_server_rise counter
nginx_upstream_server_rise{index="0",upstream_type="http",upstream="http-cluster",name="127.0.0.1:8082",status="down",type="http",port="0"} 0
nginx_upstream_server_rise{index="1",upstream_type="http",upstream="http-cluster",name="127.0.0.2:8082",status="down",type="http",port="0"} 0
nginx_upstream_server_rise{index="1",upstream_type="stream",upstream="tcp-cluster",name="192.168.0.2:22",status="down",type="tcp",port="0"} 0
nginx_upstream_server_rise{index="2",upstream_type="stream",upstream="udp-cluster",name="127.0.0.1:5432",status="down",type="udp",port="0"} 0
nginx_upstream_server_rise{index="4",upstream_type="stream",upstream="http-cluster2",name="127.0.0.1:8082",status="down",type="http",port="0"} 0
nginx_upstream_server_rise{index="5",upstream_type="stream",upstream="http-cluster2",name="127.0.0.2:8082",status="down",type="http",port="0"} 0
# HELP nginx_upstream_server_fall Nginx fall counter
# TYPE nginx_upstream_server_fall counter
nginx_upstream_server_fall{index="0",upstream_type="http",upstream="http-cluster",name="127.0.0.1:8082",status="down",type="http",port="0"} 41
nginx_upstream_server_fall{index="1",upstream_type="http",upstream="http-cluster",name="127.0.0.2:8082",status="down",type="http",port="0"} 42
nginx_upstream_server_fall{index="1",upstream_type="stream",upstream="tcp-cluster",name="192.168.0.2:22",status="down",type="tcp",port="0"} 14
nginx_upstream_server_fall{index="2",upstream_type="stream",upstream="udp-cluster",name="127.0.0.1:5432",status="down",type="udp",port="0"} 40
nginx_upstream_server_fall{index="4",upstream_type="stream",upstream="http-cluster2",name="127.0.0.1:8082",status="down",type="http",port="0"} 40
nginx_upstream_server_fall{index="5",upstream_type="stream",upstream="http-cluster2",name="127.0.0.2:8082",status="down",type="http",port="0"} 43
# HELP nginx_upstream_server_active Nginx active 1 for UP / 0 for DOWN
# TYPE nginx_upstream_server_active gauge
nginx_upstream_server_active{index="0",upstream_type="http",upstream="http-cluster",name="127.0.0.1:8082",type="http",port="0"} 0
nginx_upstream_server_active{index="1",upstream_type="http",upstream="http-cluster",name="127.0.0.2:8082",type="http",port="0"} 0
nginx_upstream_server_active{index="1",upstream_type="stream",upstream="tcp-cluster",name="192.168.0.2:22",type="tcp",port="0"} 0
nginx_upstream_server_active{index="2",upstream_type="stream",upstream="udp-cluster",name="127.0.0.1:5432",type="udp",port="0"} 0
nginx_upstream_server_active{index="4",upstream_type="stream",upstream="http-cluster2",name="127.0.0.1:8082",type="http",port="0"} 0
nginx_upstream_server_active{index="5",upstream_type="stream",upstream="http-cluster2",name="127.0.0.2:8082",type="http",port="0"} 0
root@changxun-PC:~/nginx-dev/ngx_healthcheck_module#
```

[Back to TOC](#table-of-contents)

Expand Down Expand Up @@ -210,7 +251,7 @@ stream {
healthcheck
-----------

`Syntax`: healthcheck_status [html|csv|json]
`Syntax`: healthcheck_status [html|csv|json|prometheus]

`Default`: healthcheck_status html

Expand Down Expand Up @@ -242,6 +283,8 @@ format. You can do like this:

/status?format=json

/status?format=prometheus

At present, you can fetch the list of servers with the same status by
the argument of `status`. For example:

Expand All @@ -251,6 +294,8 @@ the argument of `status`. For example:

/status?format=csv&status=up

/status?format=prometheus&status=up


[Back to TOC](#table-of-contents)

Expand Down
191 changes: 191 additions & 0 deletions ngx_healthcheck_status.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,9 @@ static void ngx_upstream_check_status_csv_format(ngx_buf_t *b,
ngx_upstream_check_peers_t *peers, ngx_uint_t flag);
static void ngx_upstream_check_status_json_format(ngx_buf_t *b,
ngx_upstream_check_peers_t *peers, ngx_uint_t flag);
static void ngx_http_upstream_check_status_prometheus_format(ngx_buf_t *b,
ngx_upstream_check_peers_t *peers, ngx_uint_t flag);

static ngx_check_status_conf_t *ngx_http_get_check_status_format_conf(
ngx_str_t *str);

Expand Down Expand Up @@ -197,6 +200,10 @@ static ngx_check_status_conf_t ngx_check_status_formats[] = {
ngx_string("application/json"), // RFC 4627
ngx_upstream_check_status_json_format },

{ ngx_string("prometheus"),
ngx_string("text/plain"),
ngx_http_upstream_check_status_prometheus_format },

{ ngx_null_string, ngx_null_string, NULL }
};

Expand Down Expand Up @@ -744,6 +751,190 @@ ngx_upstream_check_status_json_format(ngx_buf_t *b,
"}}\n");
}


static void
ngx_http_upstream_check_status_prometheus_format(ngx_buf_t *b,
ngx_upstream_check_peers_t *peers, ngx_uint_t flag)
{
ngx_uint_t count, upCount, downCount, i, j;
ngx_upstream_check_peer_t *peer;
ngx_str_t upstream_type[2] = {ngx_string("http"), ngx_string("stream")};
ngx_upstream_check_peers_t *upstream_peers[2] = {http_peers_ctx, stream_peers_ctx};

/* 1. summary */
upCount = 0;
downCount = 0;
count = 0;
for(j=0; j < 2; j++) {
peers = upstream_peers[j];
peer = peers->peers.elts;

for (i = 0; i < peers->peers.nelts; i++) {
/*
if (peer[i].delete) {
continue;
}
*/
if (flag & NGX_CHECK_STATUS_DOWN) {

if (!peer[i].shm->down) {
continue;
}

} else if (flag & NGX_CHECK_STATUS_UP) {

if (peer[i].shm->down) {
continue;
}
}

count++;
if (peer[i].shm->down) {
downCount++;
} else {
upCount++;
}
}
}
b->last = ngx_snprintf(b->last, b->end - b->last,
"# HELP nginx_upstream_count_total Nginx total number of servers\n"
"# TYPE nginx_upstream_count_total gauge\n"
"nginx_upstream_count_total %ui\n"
"# HELP nginx_upstream_count_up Nginx total number of servers that are UP\n"
"# TYPE nginx_upstream_count_up gauge\n"
"nginx_upstream_count_up %ui\n"
"# HELP nginx_upstream_count_down Nginx total number of servers that are DOWN\n"
"# TYPE nginx_upstream_count_down gauge\n"
"nginx_upstream_count_down %ui\n"
"# HELP nginx_upstream_count_generation Nginx generation\n"
"# TYPE nginx_upstream_count_generation gauge\n"
"nginx_upstream_count_generation %ui\n",
count,
upCount,
downCount,
ngx_stream_upstream_check_shm_generation);

/* 2. ngninx_upstream_server_rise */
b->last = ngx_snprintf(b->last, b->end - b->last,
"# HELP nginx_upstream_server_rise Nginx rise counter\n"
"# TYPE nginx_upstream_server_rise counter\n");

for(j=0; j < 2; j++) {
peers = upstream_peers[j];
peer = peers->peers.elts;

for (i = 0; i < peers->peers.nelts; i++) {
/*
if (peer[i].delete) {
continue;
}
*/
if (flag & NGX_CHECK_STATUS_DOWN) {

if (!peer[i].shm->down) {
continue;
}

} else if (flag & NGX_CHECK_STATUS_UP) {

if (peer[i].shm->down) {
continue;
}
}

b->last = ngx_snprintf(b->last, b->end - b->last,
"nginx_upstream_server_rise{index=\"%ui\",upstream_type=\"%V\",upstream=\"%V\",name=\"%V\",status=\"%s\",type=\"%V\",port=\"%ui\"} %ui\n",
i,
&upstream_type[j],
peer[i].upstream_name,
&peer[i].peer_addr->name,
peer[i].shm->down ? "down" : "up",
&peer[i].conf->check_type_conf->name,
peer[i].conf->port,
peer[i].shm->rise_count);
}
}

/* 3. ngninx_upstream_server_fall */
b->last = ngx_snprintf(b->last, b->end - b->last,
"# HELP nginx_upstream_server_fall Nginx fall counter\n"
"# TYPE nginx_upstream_server_fall counter\n");
for(j=0; j < 2; j++) {
peers = upstream_peers[j];
peer = peers->peers.elts;

for (i = 0; i < peers->peers.nelts; i++) {
/*
if (peer[i].delete) {
continue;
}
*/
if (flag & NGX_CHECK_STATUS_DOWN) {

if (!peer[i].shm->down) {
continue;
}

} else if (flag & NGX_CHECK_STATUS_UP) {

if (peer[i].shm->down) {
continue;
}
}

b->last = ngx_snprintf(b->last, b->end - b->last,
"nginx_upstream_server_fall{index=\"%ui\",upstream_type=\"%V\",upstream=\"%V\",name=\"%V\",status=\"%s\",type=\"%V\",port=\"%ui\"} %ui\n",
i,
&upstream_type[j],
peer[i].upstream_name,
&peer[i].peer_addr->name,
peer[i].shm->down ? "down" : "up",
&peer[i].conf->check_type_conf->name,
peer[i].conf->port,
peer[i].shm->fall_count);
}
}

/* 4. ngninx_upstream_server_active */
b->last = ngx_snprintf(b->last, b->end - b->last,
"# HELP nginx_upstream_server_active Nginx active 1 for UP / 0 for DOWN\n"
"# TYPE nginx_upstream_server_active gauge\n");
for(j=0; j < 2; j++) {
peers = upstream_peers[j];
peer = peers->peers.elts;

for (i = 0; i < peers->peers.nelts; i++) {
/*
if (peer[i].delete) {
continue;
}
*/
if (flag & NGX_CHECK_STATUS_DOWN) {

if (!peer[i].shm->down) {
continue;
}

} else if (flag & NGX_CHECK_STATUS_UP) {

if (peer[i].shm->down) {
continue;
}
}

b->last = ngx_snprintf(b->last, b->end - b->last,
"nginx_upstream_server_active{index=\"%ui\",upstream_type=\"%V\",upstream=\"%V\",name=\"%V\",type=\"%V\",port=\"%ui\"} %ui\n",
i,
&upstream_type[j],
peer[i].upstream_name,
&peer[i].peer_addr->name,
&peer[i].conf->check_type_conf->name,
peer[i].conf->port,
peer[i].shm->down ? 0 : 1);
}
}
}

static ngx_check_status_conf_t *
ngx_http_get_check_status_format_conf(ngx_str_t *str)
{
Expand Down

0 comments on commit cdc74da

Please sign in to comment.