Permalink
Browse files

Agent release 2.9.3

  • Loading branch information...
2 parents 466554d + 6cd2064 commit 11a4d4f5e470002127bcee3369d776c9d05420a6 @bkayser bkayser committed Jul 10, 2009
View
@@ -1,6 +1,7 @@
v2.9.3.
* Fix startup failure in Windows due to memory sampler
* Add JRuby environment information
+
v2.9.2.
* change default apdex_t to 0.5 seconds
* fix bug in deployments introduced by multi_homed setting
View
@@ -21,7 +21,7 @@
if ! defined?(::NEWRELIC_STARTED)
::NEWRELIC_STARTED = "#{caller.join("\n")}"
- NewRelic::Control.instance.init_plugin(defined?(config) ? {:config => config} : {})
+ NewRelic::Control.instance.init_plugin (defined?(config) ? {:config => config} : {})
else
NewRelic::Control.instance.log.debug "Attempt to initialize the plugin twice!"
NewRelic::Control.instance.log.debug "Original call: \n#{::NEWRELIC_STARTED}"
@@ -445,23 +445,14 @@ def harvest_and_send_slowest_sample
unless @traces.empty?
now = Time.now
log.debug "Sending (#{@traces.length}) transaction traces"
- begin
- # take the traces and prepare them for sending across the
- # wire. This includes gathering SQL explanations, stripping
- # out stack traces, and normalizing SQL. note that we
- # explain only the sql statements whose segments' execution
- # times exceed our threshold (to avoid unnecessary overhead
- # of running explains on fast queries.)
- traces = @traces.collect {|trace| trace.prepare_to_send(:explain_sql => @explain_threshold, :record_sql => @record_sql, :keep_backtraces => true, :explain_enabled => @explain_enabled)}
-
-
- invoke_remote :transaction_sample_data, @agent_id, traces
- rescue PostTooBigException
- # we tried to send too much data, drop the first trace and
- # try again
- @traces.shift
- retry
- end
+
+ # take the traces and prepare them for sending across the wire. This includes
+ # gathering SQL explanations, stripping out stack traces, and normalizing SQL.
+ # note that we explain only the sql statements whose segments' execution times exceed
+ # our threshold (to avoid unnecessary overhead of running explains on fast queries.)
+ traces = @traces.collect {|trace| trace.prepare_to_send(:explain_sql => @explain_threshold, :record_sql => @record_sql, :keep_backtraces => true, :explain_enabled => @explain_enabled)}
+
+ invoke_remote :transaction_sample_data, @agent_id, traces
log.debug "#{now}: sent slowest sample (#{@agent_id}) in #{Time.now - now} seconds"
end
@@ -481,57 +472,38 @@ def harvest_and_send_errors
@unsent_errors = @error_collector.harvest_errors(@unsent_errors)
if @unsent_errors && @unsent_errors.length > 0
log.debug "Sending #{@unsent_errors.length} errors"
- begin
- invoke_remote :error_data, @agent_id, @unsent_errors
- rescue PostTooBigException
- @unsent_errors.shift
- retry
- end
- # if the remote invocation fails, then we never clear
- # @unsent_errors, and therefore we can re-attempt to send on
- # the next heartbeat. Note the error collector maxes out at
- # 20 instances to prevent leakage
+
+ invoke_remote :error_data, @agent_id, @unsent_errors
+
+ # if the remote invocation fails, then we never clear @unsent_errors,
+ # and therefore we can re-attempt to send on the next heartbeat. Note
+ # the error collector maxes out at 20 instances to prevent leakage
@unsent_errors = []
end
end
-
- def compress_data(object)
- dump = Marshal.dump(object)
-
- # we currently optimize for CPU here since we get roughly a 10x
- # reduction in message size with this, and CPU overhead is at a
- # premium. For extra-large posts, we use the higher compression
- # since otherwise it actually errors out.
-
- dump_size = dump.size
-
- # small payloads don't need compression
- return [dump, 'identity'] if dump_size < 2000
+
+ # send a message via post
+ def invoke_remote(method, *args)
+ # we currently optimize for CPU here since we get roughly a 10x reduction in
+ # message size with this, and CPU overhead is at a premium. If we wanted
+ # to go for higher compression instead, we could use Zlib::BEST_COMPRESSION and
+ # pay a little more CPU.
+ data = Marshal.dump(args)
+ encoding = data.size > 2000 ? 'deflate' : 'identity' # don't compress small payloads
+ post_data = encoding == 'deflate' ? Zlib::Deflate.deflate(data, Zlib::BEST_SPEED) : data
+ http = control.http_connection(collector)
- # medium payloads get fast compression, to save CPU
- # big payloads get all the compression possible, to stay under
- # the 2,000,000 byte post threshold
- compression = dump_size < 2000000 ? Zlib::BEST_SPEED : Zlib::BEST_COMPRESSION
+ uri = "/agent_listener/#{PROTOCOL_VERSION}/#{control.license_key}/#{method}"
+ uri += "?run_id=#{@agent_id}" if @agent_id
- [Zlib::Deflate.deflate(dump, compression), 'deflate']
- end
-
- def check_post_size(post_string)
- # TODO: define this as a config option on the server side
- return if post_string.size < 2000000
- log.warn "Tried to send too much data, retrying with less: #{post_string.size} bytes"
- raise PostTooBigException
- end
-
- def send_request(opts)
- request = Net::HTTP::Post.new(opts[:uri], 'CONTENT-ENCODING' => opts[:encoding], 'ACCEPT-ENCODING' => 'gzip', 'HOST' => opts[:collector].name)
+ request = Net::HTTP::Post.new(uri, 'CONTENT-ENCODING' => encoding, 'ACCEPT-ENCODING' => 'gzip', 'HOST' => collector.name)
request.content_type = "application/octet-stream"
- request.body = opts[:data]
+ request.body = post_data
- log.debug "connect to #{opts[:collector]}#{opts[:uri]}"
+ log.debug "connect to #{collector}#{uri}"
response = nil
- http = control.http_connection(collector)
+
begin
timeout(@request_timeout) do
response = http.request(request)
@@ -541,48 +513,26 @@ def send_request(opts)
raise IgnoreSilentlyException
end
- if !(response.is_a? Net::HTTPSuccess)
+ if response.is_a? Net::HTTPSuccess
+ body = nil
+ if response['content-encoding'] == 'gzip'
+ log.debug "Decompressing return value"
+ i = Zlib::GzipReader.new(StringIO.new(response.body))
+ body = i.read
+ else
+ log.debug "Uncompressed content returned"
+ body = response.body
+ end
+ return_value = Marshal.load(body)
+ if return_value.is_a? Exception
+ raise return_value
+ else
+ return return_value
+ end
+ else
log.debug "Unexpected response from server: #{response.code}: #{response.message}"
raise IgnoreSilentlyException
- end
- response
- end
-
- def decompress_response(response)
- if response['content-encoding'] != 'gzip'
- log.debug "Uncompressed content returned"
- return response.body
- end
- log.debug "Decompressing return value"
- i = Zlib::GzipReader.new(StringIO.new(response.body))
- i.read
- end
-
- def check_for_exception(response)
- dump = decompress_response(response)
- value = Marshal.load(dump)
- raise value if value.is_a? Exception
- value
- end
-
- def remote_method_uri(method)
- uri = "/agent_listener/#{PROTOCOL_VERSION}/#{control.license_key}/#{method}"
- uri << "?run_id=#{@agent_id}" if @agent_id
- uri
- end
-
- # send a message via post
- def invoke_remote(method, *args)
- #determines whether to zip the data or send plain
- post_data, encoding = compress_data(args)
-
- # this checks to make sure mongrel won't choke on big uploads
- check_post_size(post_data)
-
- response = send_request({:uri => remote_method_uri(method), :encoding => encoding, :collector => collector, :data => post_data})
-
- # raises the right exception if the remote server tells it to die
- return check_for_exception(response)
+ end
rescue ForceDisconnectException => e
log.error "RPM forced this agent to disconnect (#{e.message})\n" \
"Restart this process to resume monitoring via rpm.newrelic.com."
@@ -1,19 +1,7 @@
if defined?(PhusionPassenger)
- NewRelic::Control.instance.log.debug "Installing Passenger event hooks."
-
+ NewRelic::Control.instance.log.debug "Installing Passenger shutdown hook."
PhusionPassenger.on_event(:stopping_worker_process) do
NewRelic::Control.instance.log.info "Passenger stopping this process, shutdown the agent."
NewRelic::Agent.instance.shutdown
end
-
- PhusionPassenger.on_event(:starting_worker_process) do |forked|
- if forked
- # We want to clear the stats from the stats engine in case any carried
- # over into the spawned process.
- NewRelic::Agent.instance.stats_engine.clear_stats
- else
- # We're in conservative spawning mode. We don't need to do anything.
- end
- end
-
end
@@ -34,6 +34,10 @@
alias_method :perform_action, :perform_action_with_newrelic_trace
private :perform_action
+ #add_method_tracer :render_for_file, 'View/#{args[0]}/ForFile/Rendering'
+ #add_method_tracer :render_for_text, 'View/#{newrelic_metric_path}/Text/Rendering'
+ #add_method_tracer :render, 'View/#{newrelic_metric_path}/Rendering'
+
def self.newrelic_write_attr(attr_name, value) # :nodoc:
write_inheritable_attribute(attr_name, value)
end
@@ -252,11 +252,9 @@ def end_transaction
Thread::current[:newrelic_transaction_name] = nil
end
- # Empty the stats engine, such as when a new passenger instance starts up.
- def clear_stats
+ def clear_stats # :nodoc: for test code only
@stats_hash.clear
end
-
private
# Call poll on each of the samplers. Remove
@@ -178,6 +178,7 @@ def harvest(previous = nil, slow_threshold = 2.0)
if (@harvest_count % @sampling_rate) == 0
result << @random_sample if @random_sample
+ @random_sample = nil
end
end
@@ -191,8 +192,6 @@ def harvest(previous = nil, slow_threshold = 2.0)
result << previous_slowest
end
end
-
- @random_sample = nil
end
result
end
@@ -75,7 +75,7 @@ def gather_environment_info
append_environment_value('Ruby version'){ RUBY_VERSION }
append_environment_value('Ruby platform') { RUBY_PLATFORM }
append_environment_value('Ruby patchlevel') { RUBY_PATCHLEVEL }
- if defined? ::JRUBY_VERSION
+ if defined? Java
append_environment_value('JRuby version') { JRUBY_VERSION }
append_environment_value('Java VM version') { ENV_JAVA['java.vm.version']}
end
@@ -160,7 +160,7 @@ def discover_framework
end
def check_for_glassfish
- return unless defined?(::Java) &&
+ return unless defined?(Java) &&
(((com.sun.grizzly.jruby.rack.DefaultRackApplicationFactory rescue nil) &&
defined?(com::sun::grizzly::jruby::rack::DefaultRackApplicationFactory)) ||
((org.jruby.rack.DefaultRackApplicationFactory rescue nil) &&
@@ -20,10 +20,6 @@ def hash
h
end
- def to_s
- "#{name}:#{scope}"
- end
-
def to_json(*a)
{'name' => name,
'scope' => scope}.to_json(*a)
View
@@ -1,7 +1,7 @@
# run unit tests for the NewRelic Agent
if defined? Rake::TestTask
namespace :test do
- AGENT_HOME = File.join(File.dirname(__FILE__), "..","..")
+ AGENT_HOME = File.expand_path(File.join(File.dirname(__FILE__), "..",".."))
Rake::TestTask.new(:newrelic) do |t|
t.libs << "#{AGENT_HOME}/test"
t.libs << "#{AGENT_HOME}/lib"
@@ -45,9 +45,9 @@ def test_with_delimiter
private
def assert_clean(backtrace, rails=false)
if !rails
- assert_equal 0, backtrace.grep('/rails/').size, backtrace.join("\n")
+ assert_equal 0, backtrace.grep('/rails/').size, backtrace.grep(/newrelic_rpm/)
end
- assert_equal 0, backtrace.grep(/trace/).size, backtrace.join("\n")
- assert_equal 0, backtrace.grep(/newrelic_rpm\/agent/).size, backtrace.join("\n")
+ assert_equal 0, backtrace.grep(/trace/).size, backtrace.grep(/trace/)
+ assert_equal 0, backtrace.grep(/newrelic_rpm\/lib/).size, backtrace.grep(/newrelic_rpm\/lib/)
end
end

0 comments on commit 11a4d4f

Please sign in to comment.