Skip to content
This repository has been archived by the owner on Apr 2, 2024. It is now read-only.

Commit

Permalink
Merge pull request #1492 from 4r9h/issue_1461
Browse files Browse the repository at this point in the history
Issue 1461
  • Loading branch information
rafrombrc committed Apr 27, 2015
2 parents e4e7758 + b4dbd37 commit a7f8c9f
Show file tree
Hide file tree
Showing 5 changed files with 313 additions and 0 deletions.
2 changes: 2 additions & 0 deletions CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ Features

* Added splitters to the reports displayed by Heka after SIGUSR1 signal.

* Added `graphite` module with helpers allowing to generate graphite metrics for counters and timeseries (#1461).

Bug Handling
------------

Expand Down
10 changes: 10 additions & 0 deletions docs/source/sandbox/module.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,13 @@ ElasticSearch Module
:start-after: --[[
:end-before: --]]

.. _sandbox_elasticsearch_module:

Graphite Module
--------------------

.. versionadded:: 0.10

.. include:: ../../../sandbox/lua/modules/graphite.lua
:start-after: --[[
:end-before: --]]
71 changes: 71 additions & 0 deletions sandbox/lua/lua_sandbox_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,77 @@ func TestCJson(t *testing.T) {
sb.Destroy("")
}

func TestGraphiteHelpers(t *testing.T) {
var sbc SandboxConfig
sbc.ScriptFilename = "./testsupport/graphite.lua"
sbc.ModuleDirectory = "./modules"
sbc.MemoryLimit = 100000
sbc.InstructionLimit = 1000
sbc.OutputLimit = 8000
sbc.Config = make(map[string]interface{})

sb, err := lua.CreateLuaSandbox(&sbc)
if err != nil {
t.Errorf("%s", err)
}

err = sb.Init("")
if err != nil {
t.Errorf("%s", err)
}

for i := 0; i < 4; i++ {
pack := getTestPack()
pack.Message.SetHostname("localhost")
pack.Message.SetLogger("GoSpec")

message.NewIntField(pack.Message, "status", 200, "status")

message.NewIntField(pack.Message, "request_time", 15*i, "request_time")
r := sb.ProcessMessage(pack)
if r != 0 {
t.Errorf("Graphite returned %s", r)
}
}

injectCount := 0
sb.InjectMessage(func(payload, payload_type, payload_name string) int {
graphite_payload := `stats.counters.localhost.nginx.GoSpec.http_200.count 4 0
stats.counters.localhost.nginx.GoSpec.http_200.rate 0.400000 0
stats.timers.localhost.nginx.GoSpec.request_time.count 4 0
stats.timers.localhost.nginx.GoSpec.request_time.count_ps 0.400000 0
stats.timers.localhost.nginx.GoSpec.request_time.lower 0.000000 0
stats.timers.localhost.nginx.GoSpec.request_time.upper 45.000000 0
stats.timers.localhost.nginx.GoSpec.request_time.sum 90.000000 0
stats.timers.localhost.nginx.GoSpec.request_time.mean 22.500000 0
stats.timers.localhost.nginx.GoSpec.request_time.mean_90 22.500000 0
stats.timers.localhost.nginx.GoSpec.request_time.upper_90 45.000000 0
stats.statsd.numStats 2 0
`
if payload_type != "txt" {
t.Errorf("Received payload type: %s", payload_type)
}

if payload_name != "statmetric" {
t.Errorf("Received payload name: %s", payload_name)
}

if graphite_payload != payload {
t.Errorf("Received payload: %s", payload)
}
injectCount += 1
return 0
})

sb.TimerEvent(200)

if injectCount > 0 {
t.Errorf("Looks there was an error during timer_event")
}

sb.Destroy("")
}

func TestReadNilConfig(t *testing.T) {
var sbc SandboxConfig
sbc.ScriptFilename = "./testsupport/read_config_nil.lua"
Expand Down
182 changes: 182 additions & 0 deletions sandbox/lua/modules/graphite.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this
-- file, You can obtain one at http://mozilla.org/MPL/2.0/.

--[[
Module contains various utility functions for metrics stored Graphite.
Currently module focuses on generators of metrics which can be later passed
to Graphite.
API
^^^
**count_rate(bucket, count, ticker_interval, now_sec)**
Generates string with count and rate metric for graphite.
*Arguments*
- bucket - node name in which metric will be stored.
- count - value of count.
- ticker_interval - base interval for calculation of rate.
- now_sec - timestamp (float) for metric.
*Return*
String with count and rate metric for `stats.counters.<bucket>.count` and
`stats.counters.<bucket>.rate` bucket with time given in <now_sec>.
**multi_counts_rates(counts, ticker_interval, now_sec)**
Generates a multiline graphite count metric with their rates.
*Arguments*
- counts - table, indices will be mapped to buckets and values to their specific counts e.g {'bucket1': 1, 'bucket2':2}
- ticker_interval - base interval for calculation of rate.
- now_sec - timestamp (float) for metric.
*Return*
String with multiple counts and rates returned via `return_count` function.
**timeseries_metrics(bucket, times, ticker_interval, percent_thresh, now_sec)**
Generates string with metrics for given timeseries data to pass it to graphite.
*Arguments*
- bucket - node name in which metric will be stored.
- times - a table with times (float values).
- ticker_interval - base interval for calculation of rate.
- percent_treshold - base treshould for percentiles.
- now_sec - timestamp (float) for metric.
*Return*
Calculates metrics like:
And returns multine graphite string with following metrics:
- stats.timers.<bucket>.count
- stats.timers.<bucket>.rate
- stats.timers.<bucket>.min
- stats.timers.<bucket>.max
- stats.timers.<bucket>.mean
- stats.timers.<bucket>.mean_percentile
- stats.timers.<bucket>.upper_percentile
**multi_timeseries_metrics(timers, ticker_interval, percent_thresh, now_sec)**
Returns multline string with stats calculated for timeseries in their respective buckets
*Arguments*
- timers - tables with bucket names and tables of times inside e.g {'bucket': [1,2,3,4]}
- ticker_interval - base interval for calculation of rate.
- percent_treshold - base treshould for percentiles.
- now_sec - timestamp (float) for metric.
*Return*
String with corresponding metric series and their respective buckets defined in timers table.
Metrics are the same as for the `timeseries_metrics_function`.
**function ns_to_sec(ns)**
Converts nanoseconds into seconds.
*Arguments*
- ns - nanoseconds
*Return*
Seconds in float value.
--]]

-- Imports
local string = require "string"
local math = require "math"
local table = require "table"

--[[ Removes external access for the rest of modules. --]]
local M = {}
setfenv(1, M)

--[[ Public interface --]]
function count_rate(bucket, count, ticker_interval, now_sec)
return string.format("stats.counters.%s.count %d %d\nstats.counters.%s.rate %f %d\n",
bucket, count, now_sec,
bucket, count/ticker_interval, now_sec)
end

function multi_counts_rates(counts, ticker_interval, now_sec)
local stats = {}
for bucket, count in pairs(counts) do
stats[#stats+1] = count_rate(bucket, count, ticker_interval, now_sec)
end
return table.concat(stats)
end

function timeseries_metrics(bucket, times, ticker_interval, percent_thresh, now_sec)
local stats
local count, min, max, sum, mean, rate, mean_percentile, upper_percentile

local cumulative, tmp
count = #times
if count == 0 then
min = 0
max = 0
sum = 0
mean = 0
rate = 0
mean_percentile = 0
upper_percentile = 0
else
rate = count / ticker_interval
table.sort(times)
min = times[1]
max = times[count]
mean = min
local thresh_bound = max

cumulative = {}
cumulative[0] = 0
for i, time in ipairs(times) do
cumulative[i] = cumulative[i-1] + time
end

if count > 1 then
tmp = ((100 - percent_thresh) / 100) * count
local num_in_thresh = count - math.floor(tmp+.5)
if num_in_thresh > 0 then
mean = cumulative[num_in_thresh] / num_in_thresh
thresh_bound = times[num_in_thresh]
else
mean = min
thresh_bound = max
end
end
mean_percentile = mean
upper_percentile = thresh_bound
sum = cumulative[count]
mean = sum / count
end

return string.format([[stats.timers.%s.count %d %d
stats.timers.%s.count_ps %f %d
stats.timers.%s.lower %f %d
stats.timers.%s.upper %f %d
stats.timers.%s.sum %f %d
stats.timers.%s.mean %f %d
stats.timers.%s.mean_%d %f %d
stats.timers.%s.upper_%d %f %d
]],
bucket, count, now_sec,
bucket, rate, now_sec,
bucket, min, now_sec,
bucket, max, now_sec,
bucket, sum, now_sec,
bucket, mean, now_sec,
bucket, percent_thresh, mean_percentile, now_sec,
bucket, percent_thresh, upper_percentile, now_sec)
end

function multi_timeseries_metrics(timers, ticker_interval, percent_thresh, now_sec)
local stats = {}
for bucket, times in pairs(timers) do
stats[#stats+1] = timeseries_metrics(bucket, times, ticker_interval, percent_thresh, now_sec)
end
return table.concat(stats)
end

function ns_to_sec(ns)
return math.floor(ns / 1e9)
end

return M
48 changes: 48 additions & 0 deletions sandbox/lua/testsupport/graphite.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
require "string"
require "math"
require "table"
require "cjson"

local graphite = require "graphite"

local status_codes = {}
local request_times = {}

local ticker_interval = 10
local percent_thresh = 90

function process_message()
local hostname = read_message("Hostname")
local logger = read_message("Logger")
local status = read_message("Fields[status]")
local request_time = read_message("Fields[request_time]")

local bucket = string.format("%s.nginx.%s.http_%d", hostname, logger, status)
local val = status_codes[bucket] or 0
status_codes[bucket] = val + 1

bucket = string.format("%s.nginx.%s.request_time", hostname, logger)
val = request_times[bucket] or {}
val[#val+1] = request_time
request_times[bucket] = val
return 0
end

function timer_event(ns)
local now_sec = graphite.ns_to_sec(ns)
local num_stats = 0

for bucket, count in pairs(status_codes) do
num_stats = num_stats + 1
end

add_to_payload(graphite.multi_counts_rates(status_codes, ticker_interval, now_sec))
add_to_payload(graphite.multi_timeseries_metrics(request_times, ticker_interval, percent_thresh, now_sec))

for bucket, times in pairs(request_times) do
num_stats = num_stats + 1
end

add_to_payload(string.format("stats.statsd.numStats %d %d\n", num_stats, now_sec))
inject_payload("txt", "statmetric")
end

0 comments on commit a7f8c9f

Please sign in to comment.