Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 39 additions & 1 deletion collector/indices.go
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ var (
nil,
)

indicesShardsLabels = []string{"index", "shard", "node", "primary", "cluster"}
indicesShardsLabels = []string{"index", "shard", "node", "node_name", "primary", "cluster"}

indicesShardDocs = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "indices", "shards_docs"),
Expand Down Expand Up @@ -621,6 +621,26 @@ func (i *Indices) fetchAndDecodeIndexStats(ctx context.Context) (indexStatsRespo
return isr, nil
}

func (i *Indices) fetchAndDecodeNodes(ctx context.Context) (nodesResponse, error) {
var nr nodesResponse

u := i.url.ResolveReference(&url.URL{Path: "/_nodes"})
q := u.Query()
q.Set("filter_path", "nodes.*.name")
u.RawQuery = q.Encode()

resp, err := getURL(ctx, i.client, i.logger, u.String())
if err != nil {
return nr, err
}

if err := json.Unmarshal(resp, &nr); err != nil {
return nr, err
}

return nr, nil
}

// getClusterName returns the cluster name. If no clusterinfo retriever is
// attached (e.g. /probe mode) it performs a lightweight call to the root
// endpoint once and caches the result.
Expand Down Expand Up @@ -659,6 +679,17 @@ func (i *Indices) Collect(ch chan<- prometheus.Metric) {
return
}

var nodesResp nodesResponse
if i.shards {
nodesResp, err = i.fetchAndDecodeNodes(ctx)
if err != nil {
i.logger.Warn(
"failed to fetch and decode nodes",
"err", err,
)
}
}

// Alias stats
if i.aliases {
for indexName, aliases := range indexStatsResp.Aliases {
Expand Down Expand Up @@ -1272,13 +1303,18 @@ func (i *Indices) Collect(ch chan<- prometheus.Metric) {
if i.shards {
for shardNumber, shards := range indexStats.Shards {
for _, shard := range shards {
nodeName := shard.Routing.Node
if node, ok := nodesResp.Nodes[shard.Routing.Node]; ok {
nodeName = node.Name
}
ch <- prometheus.MustNewConstMetric(
indicesShardDocs,
prometheus.GaugeValue,
float64(shard.Docs.Count),
indexName,
shardNumber,
shard.Routing.Node,
nodeName,
strconv.FormatBool(shard.Routing.Primary),
i.getClusterName(),
)
Expand All @@ -1289,6 +1325,7 @@ func (i *Indices) Collect(ch chan<- prometheus.Metric) {
indexName,
shardNumber,
shard.Routing.Node,
nodeName,
strconv.FormatBool(shard.Routing.Primary),
i.getClusterName(),
)
Expand All @@ -1299,6 +1336,7 @@ func (i *Indices) Collect(ch chan<- prometheus.Metric) {
indexName,
shardNumber,
shard.Routing.Node,
nodeName,
strconv.FormatBool(shard.Routing.Primary),
i.getClusterName(),
)
Expand Down
7 changes: 7 additions & 0 deletions collector/indices_response.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,3 +223,10 @@ type IndexStatsIndexRecoveryResponse struct {
CurrentAsTarget int64 `json:"current_as_target"`
ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
}

// nodesResponse is a representation of Elasticsearch Nodes API with name filter
type nodesResponse struct {
Nodes map[string]struct {
Name string `json:"name"`
} `json:"nodes"`
}
39 changes: 26 additions & 13 deletions collector/indices_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2081,22 +2081,22 @@ func TestIndices(t *testing.T) {
elasticsearch_indices_segment_version_map_memory_bytes_total{cluster="unknown_cluster",index="foo_3"} 0
# HELP elasticsearch_indices_shards_docs Count of documents on this shard
# TYPE elasticsearch_indices_shards_docs gauge
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index=".geoip_databases",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 37
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index="foo_1",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 2
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index="foo_2",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 3
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index="foo_3",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index=".geoip_databases",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 37
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index="foo_1",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 2
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index="foo_2",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 3
elasticsearch_indices_shards_docs{cluster="unknown_cluster",index="foo_3",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 0
# HELP elasticsearch_indices_shards_docs_deleted Count of deleted documents on this shard
# TYPE elasticsearch_indices_shards_docs_deleted gauge
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index=".geoip_databases",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index="foo_1",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index="foo_2",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index="foo_3",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index=".geoip_databases",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index="foo_1",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index="foo_2",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 0
elasticsearch_indices_shards_docs_deleted{cluster="unknown_cluster",index="foo_3",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 0
# HELP elasticsearch_indices_shards_store_size_in_bytes Store size of this shard
# TYPE elasticsearch_indices_shards_store_size_in_bytes gauge
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index=".geoip_databases",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 3.7286036e+07
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index="foo_1",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 8600
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index="foo_2",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 12925
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index="foo_3",node="49nZYKtiQdGg7Nl_sVsI1A",primary="true",shard="0"} 226
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index=".geoip_databases",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 3.7286036e+07
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index="foo_1",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 8600
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index="foo_2",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 12925
elasticsearch_indices_shards_store_size_in_bytes{cluster="unknown_cluster",index="foo_3",node="49nZYKtiQdGg7Nl_sVsI1A",node_name="es-node-1",primary="true",shard="0"} 226
# HELP elasticsearch_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes
# TYPE elasticsearch_indices_store_size_bytes_primary gauge
elasticsearch_indices_store_size_bytes_primary{cluster="unknown_cluster",index=".geoip_databases"} 3.7286036e+07
Expand Down Expand Up @@ -2139,6 +2139,17 @@ func TestIndices(t *testing.T) {
}
defer fAlias.Close()

var fNodes io.ReadCloser
fNodes, err = os.Open(path.Join("../fixtures/nodes/", tt.file))
if err != nil {
if os.IsNotExist(err) {
fNodes = io.NopCloser(strings.NewReader("{}"))
} else {
t.Fatal(err)
}
}
defer fNodes.Close()

ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/_all/_stats":
Expand All @@ -2150,6 +2161,8 @@ func TestIndices(t *testing.T) {
}
case "/_alias":
io.Copy(w, fAlias)
case "/_nodes":
io.Copy(w, fNodes)
default:
http.Error(w, "Not Found", http.StatusNotFound)
}
Expand All @@ -2161,7 +2174,7 @@ func TestIndices(t *testing.T) {
t.Fatal(err)
}

c := NewIndices(promslog.NewNopLogger(), http.DefaultClient, u, false, true)
c := NewIndices(promslog.NewNopLogger(), http.DefaultClient, u, tt.shards, true)
if err != nil {
t.Fatal(err)
}
Expand Down
7 changes: 7 additions & 0 deletions fixtures/nodes/7.17.3.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"nodes": {
"49nZYKtiQdGg7Nl_sVsI1A": {
"name": "es-node-1"
}
}
}