Permalink
Browse files

initial import

  • Loading branch information...
0 parents commit f0795ef22db8a4ccd2900683aa527af24cd73ac5 @obfuscurity committed May 30, 2012
@@ -0,0 +1,4 @@
+.bundle
+.env
+.foreman
+
@@ -0,0 +1,7 @@
+source :rubygems
+
+gem 'sinatra'
+gem 'thin'
+gem 'json'
+gem 'yajl-ruby'
+gem 'newrelic_rpm'
@@ -0,0 +1,30 @@
+GEM
+ remote: http://rubygems.org/
+ specs:
+ daemons (1.1.8)
+ eventmachine (0.12.10)
+ json (1.6.1)
+ newrelic_rpm (3.3.4.1)
+ rack (1.4.1)
+ rack-protection (1.2.0)
+ rack
+ sinatra (1.3.2)
+ rack (~> 1.3, >= 1.3.6)
+ rack-protection (~> 1.2)
+ tilt (~> 1.3, >= 1.3.3)
+ thin (1.3.1)
+ daemons (>= 1.0.9)
+ eventmachine (>= 0.12.6)
+ rack (>= 1.0.0)
+ tilt (1.3.3)
+ yajl-ruby (1.1.0)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ json
+ newrelic_rpm
+ sinatra
+ thin
+ yajl-ruby
26 LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2012, Jason Dixon
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * The name Jason Dixon may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL JASON DIXON BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1 @@
+web: bundle exec rackup -p $PORT -s thin
@@ -0,0 +1,69 @@
+# Backstop
+
+Backstop is a simple endpoint for submitting metrics to Graphite. It accepts JSON data via HTTP POST and proxies the data to one or more Carbon/Graphite listeners.
+
+## Usage
+
+### Collectd Metrics
+
+Backstop supports submission of metrics via the Collectd [write_http](http://collectd.org/wiki/index.php/Plugin:Write_HTTP) output plugin. A sample client configuration:
+
+```
+<Plugin write_http>
+ <URL "https://backstop.example.com/collectd">
+ Format "JSON"
+ User ""
+ Password ""
+ </URL>
+</Plugin>
+```
+
+### Custom Metrics
+
+Use the `/publish` endpoint in conjunction with one of the approved `PREFIXES` for submitting metrics to Backstop. In most environments it makes sense to use distinct prefixes for normal (e.g. gauge, counters, etc) metrics vs annotation (event-style) metrics.
+
+#### Sending Metrics
+
+Here is a basic example for posting an application metric to the `custom` prefix.
+
+```ruby
+RestClient.post("https://backstop.example.com/publish/custom",
+ [{:metric => key, :value => value, :measure_time => Time.now.to_i}].to_json)
+```
+
+#### Sending Annotations
+
+Here is an example for posting a software release announcement to the `note` prefix.
+
+```ruby
+RestClient.post("https://backstop.example.com/publish/note",
+ [{:metric => "foobar.release", :value => "v214", :measure_time => Time.now.to_i}].to_json)
+```
+
+## Deployment
+
+### Local
+
+```bash
+$ export CARBON_URLS=...
+$ export PREFIXES=...
+$ foreman start
+```
+
+### Platform
+
+```bash
+$ heroku create -s cedar
+$ heroku config:add CARBON_URLS=...
+$ heroku config:add PREFIXES=...
+$ git push heroku master
+```
+
+## License
+
+Backstop is distributed under a 3-clause BSD license.
+
+## Thanks
+
+Thanks to Michael Gorsuch (@gorsuch) for his initial work on the collectd parser as part of the original "Mitt" application that preceded Backstop.
+
@@ -0,0 +1,4 @@
+$:.unshift File.dirname(__FILE__) + '/lib'
+require "backstop/web"
+
+run Backstop::Application
@@ -0,0 +1,15 @@
+---
+production:
+ agent_enabled: true
+ error_collector:
+ capture_source: true
+ enabled: true
+ ignore_errors: ActionController::RoutingError
+ apdex_t: 0.5
+ ssl: true
+ monitor_mode: true
+ license_key: <%= ENV["NEW_RELIC_LICENSE_KEY"] %>
+ developer_mode: false
+ app_name: <%= ENV["NEW_RELIC_APP_NAME"] %>
+ capture_params: false
+ log_level: info
@@ -0,0 +1,47 @@
+require 'json'
+
+class CollectdData
+
+ # ALL PLUGIN CHECKS ARE EXPECTED TO RETURN AN ARRAY OF HASHES OR AN EMPTY ARRAY
+ Dir[File.dirname(__FILE__) + '/plugins/*.rb'].each do |file|
+ f = File.basename(file).gsub(/\.rb/, "")
+ require "backstop/collectd/plugins/#{f}"
+ end
+
+ attr_accessor :data
+
+ def initialize(data)
+ self.data = data
+ end
+
+ def parse
+ base = parse_base
+ plugin = parse_plugin
+ plugin.map {|p| p.merge base}
+ end
+
+ # extract cloud, slot, and id
+ def parse_base
+ hostname = data['host'].gsub('DOT','.').gsub('DASH', '-')
+ parts = hostname.split('.')
+ id = parts.last
+ slot = parts[-2]
+ cloud = parts.first(parts.size-2).join('.')
+ measure_period = (data['interval'] || 10).to_i
+ {id: id, slot: slot, cloud: cloud, measure_period: measure_period, measure_time: data['time']}
+ end
+
+ # extract the juicy bits, but do it dynamically
+ # we check for the existence of a predefined method called parse_plugin_PLUGIN
+ # if it exists, we dispatch. If it doesn't, we return an empty array
+ def parse_plugin
+ plugin = data['plugin']
+ method = "parse_plugin_#{plugin}".to_sym
+ if self.respond_to? method
+ send(method)
+ else
+ []
+ end
+ end
+
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # conntrack stats
+ def parse_plugin_conntrack
+ [{
+ metric: "conntrack.connections",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # cpu stats
+ def parse_plugin_cpu
+ [{
+ metric: "cpu.#{data['plugin_instance']}.#{data['type_instance']}",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # disk partition stats
+ def parse_plugin_df
+ [
+ { metric: "df.#{data['type_instance']}.used", value: data['values'][0] },
+ { metric: "df.#{data['type_instance']}.free", value: data['values'][1] }
+ ]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # disk volume stats
+ def parse_plugin_disk
+ [
+ { metric: "disk.#{data['plugin_instance']}.#{data['type']}.write", value: data['values'][0] },
+ { metric: "disk.#{data['plugin_instance']}.#{data['type']}.read", value: data['values'][1] }
+ ]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # droid stats
+ def parse_plugin_droid
+ [{
+ metric: "droid.#{data['type_instance']}",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # file system performance
+ def parse_plugin_fsperformance
+ [{
+ metric: "#{data['plugin']}.#{data['plugin_instance']}.#{data['type_instance']}",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # interface stats
+ def parse_plugin_interface
+ [
+ { metric: "net.#{data['type_instance']}.#{data['type']}.in", value: data['values'][0] },
+ { metric: "net.#{data['type_instance']}.#{data['type']}.out",value: data['values'][1] }
+ ]
+ end
+end
@@ -0,0 +1,10 @@
+class CollectdData
+ # system load
+ def parse_plugin_load
+ [
+ { metric: 'load.1m', value: data['values'][0] },
+ { metric: 'load.5m', value: data['values'][1] },
+ { metric: 'load.15m', value: data['values'][2] }
+ ]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # memory stats
+ def parse_plugin_memory
+ [{
+ metric: "memory.#{data['type_instance']}",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # nfs iostat
+ def parse_plugin_nfsiostat
+ [{
+ metric: "#{data['plugin']}.#{data['plugin_instance']}.#{data['type_instance']}",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,50 @@
+class CollectdData
+ def parse_plugin_processes
+ # matches specific proceses
+ if !data['plugin_instance'].empty?
+ ps_value_map = {
+ "ps_count" => ["num_proc", "num_thread"],
+ "ps_disk_ops" => ["read", "write"],
+ "ps_disk_octets" => ["read", "write"],
+ "ps_pagefaults" => ["minor", "major"],
+ "ps_cputime" => ["user", "system"]
+ }
+
+ if (map = ps_value_map[data['type']])
+ [
+ {
+ metric: "#{data['plugin']}.#{data['plugin_instance']}.#{data['type']}.#{map[0]}",
+ value: data['values'][0]
+ },
+ {
+ metric: "#{data['plugin']}.#{data['plugin_instance']}.#{data['type']}.#{map[1]}",
+ value: data['values'][1]
+ }
+ ]
+ else
+ [
+ {
+ metric: "#{data['plugin']}.#{data['plugin_instance']}.#{data['type']}",
+ value: data['values'][0]
+ }
+ ]
+ end
+ elsif data['type_instance'].empty?
+ # matches fork_rate
+ [
+ {
+ metric: "processes.#{data['type']}",
+ value: data['values'][0]
+ }
+ ]
+ else
+ # everything else in ps_state
+ [
+ {
+ metric: "processes.#{data['type_instance']}",
+ value: data['values'][0]
+ }
+ ]
+ end
+ end
+end
@@ -0,0 +1,9 @@
+class CollectdData
+ # swap stats
+ def parse_plugin_swap
+ [{
+ metric: "swap.#{data['type_instance']}",
+ value: data['values'][0]
+ }]
+ end
+end
@@ -0,0 +1,9 @@
+#class CollectdData
+# # tcpconns stats
+# def parse_plugin_tcpconns
+# [{
+# metric: "tcpconns.#{data['plugin_instance']}.#{data['type_instance']}",
+# value: data['values'][0]
+# }]
+# end
+#end
@@ -0,0 +1,13 @@
+module Backstop
+ module Config
+ def self.env!(key)
+ ENV[key] || raise("missing #{key}")
+ end
+
+ def self.deploy; env!("DEPLOY"); end
+ def self.port; env!("PORT").to_i; end
+ def self.carbon_urls; env!("CARBON_URLS").split(","); end
+ def self.prefixes; env!("PREFIXES").split(","); end
+ def self.devcloud_url; ENV["DEVCLOUD_URL"] || nil; end
+ end
+end
Oops, something went wrong.

0 comments on commit f0795ef

Please sign in to comment.