Skip to content

Commit

Permalink
DEBUG-2657 remove benchmark dogstatsd reporting & run_forever (#3827)
Browse files Browse the repository at this point in the history
* remove benchmark dogstatsd reporting

* remove benchmark run_forever

---------

Co-authored-by: Oleg Pudeyev <code@olegp.name>
  • Loading branch information
p-datadog and p authored Aug 6, 2024
1 parent 8dfe622 commit 0ba87b9
Show file tree
Hide file tree
Showing 8 changed files with 4 additions and 161 deletions.
83 changes: 0 additions & 83 deletions benchmarks/dogstatsd_reporter.rb

This file was deleted.

3 changes: 0 additions & 3 deletions benchmarks/profiler_allocation.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
require 'benchmark/ips'
require 'datadog'
require 'pry'
require_relative 'dogstatsd_reporter'

# This benchmark measures the performance of allocation profiling

Expand All @@ -25,7 +24,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_allocation')
)

x.report('Allocations (baseline)', 'BasicObject.new')
Expand All @@ -48,7 +46,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_allocation')
)

x.report("Allocations (#{ENV['CONFIG']})", 'BasicObject.new')
Expand Down
7 changes: 0 additions & 7 deletions benchmarks/profiler_gc.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
require 'benchmark/ips'
require 'datadog'
require 'pry'
require_relative 'dogstatsd_reporter'

# This benchmark measures the performance of GC profiling

Expand Down Expand Up @@ -34,7 +33,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_gc')
)

# The idea of this benchmark is to test the overall cost of the Ruby VM calling these methods on every GC.
Expand All @@ -53,7 +51,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_gc_minute')
)

# We cap the number of minor GC samples to not happen more often than TIME_BETWEEN_GC_EVENTS_NS (10)
Expand All @@ -80,7 +77,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_gc_integration')
)

x.report('Major GC runs (profiling disabled)', 'GC.start')
Expand All @@ -100,7 +96,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_gc_integration')
)

x.report('Major GC runs (profiling enabled)', 'GC.start')
Expand All @@ -115,7 +110,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_gc_integration_allocations')
)

x.report('Allocations (profiling disabled)', 'Object.new')
Expand All @@ -135,7 +129,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_gc_integration_allocations')
)

x.report('Allocations (profiling enabled)', 'Object.new')
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/profiler_hold_resume_interruptions.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
require 'benchmark/ips'
require 'datadog'
require 'pry'
require_relative 'dogstatsd_reporter'

# This benchmark measures the performance of the hold/resume interruptions used by the DirMonkeyPatches
class ProfilerHoldResumeInterruptions
Expand All @@ -22,7 +21,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_hold_resume_interruptions')
)

x.report("hold / resume") do
Expand Down
15 changes: 1 addition & 14 deletions benchmarks/profiler_http_transport.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
require 'pry'
require 'securerandom'
require 'socket'
require_relative 'dogstatsd_reporter'

# This benchmark measures the performance of the http_transport class used for reporting profiling data
#
Expand Down Expand Up @@ -81,7 +80,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 70, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_http_transport')
)

x.report("http_transport #{ENV['CONFIG']}") do
Expand All @@ -93,13 +91,6 @@ def run_benchmark
end
end

def run_forever
while true
100.times { run_once }
print '.'
end
end

def run_once
success = @transport.export(@flush)

Expand All @@ -110,9 +101,5 @@ def run_once
puts "Current pid is #{Process.pid}"

ProfilerHttpTransportBenchmark.new.instance_exec do
if ARGV.include?('--forever')
run_forever
else
run_benchmark
end
run_benchmark
end
19 changes: 1 addition & 18 deletions benchmarks/profiler_memory_sample_serialize.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
require 'benchmark/ips'
require 'datadog'
require 'pry'
require_relative 'dogstatsd_reporter'

require 'libdatadog'

Expand Down Expand Up @@ -66,7 +65,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 30, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_memory_sample_serialize')
)

x.report("sample+serialize #{ENV['CONFIG']} retain_every=#{@retain_every} heap_samples=#{@heap_samples_enabled} heap_size=#{@heap_size_enabled} heap_sample_every=#{@heap_sample_every} skip_end_gc=#{@skip_end_gc}") do
Expand All @@ -89,26 +87,11 @@ def run_benchmark
x.compare!
end
end

def run_forever
loop do
recorder = @recorder_factory.call
1000.times do |i|
sample_object(recorder, i % 400)
end
recorder.serialize
print '.'
end
end
end

puts "Current pid is #{Process.pid}"

ProfilerMemorySampleSerializeBenchmark.new.instance_exec do
setup
if ARGV.include?('--forever')
run_forever
else
run_benchmark
end
run_benchmark
end
18 changes: 1 addition & 17 deletions benchmarks/profiler_sample_loop_v2.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
require 'benchmark/ips'
require 'datadog'
require 'pry'
require_relative 'dogstatsd_reporter'

# This benchmark measures the performance of the main stack sampling loop of the profiler

Expand Down Expand Up @@ -46,7 +45,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_sample_loop_v2')
)

x.report("stack collector #{ENV['CONFIG']}") do
Expand All @@ -59,26 +57,12 @@ def run_benchmark

@recorder.serialize
end

def run_forever
while true
1000.times do
Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample(@collector, PROFILER_OVERHEAD_STACK_THREAD)
end
@recorder.serialize
print '.'
end
end
end

puts "Current pid is #{Process.pid}"

ProfilerSampleLoopBenchmark.new.instance_exec do
create_profiler
4.times { thread_with_very_deep_stack }
if ARGV.include?('--forever')
run_forever
else
run_benchmark
end
run_benchmark
end
18 changes: 1 addition & 17 deletions benchmarks/profiler_sample_serialize.rb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
require 'benchmark/ips'
require 'datadog'
require 'pry'
require_relative 'dogstatsd_reporter'

require 'libdatadog'

Expand Down Expand Up @@ -40,7 +39,6 @@ def run_benchmark
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.01, warmup: 0 } : { time: 10, warmup: 2 }
x.config(
**benchmark_time,
suite: report_to_dogstatsd_if_enabled_via_environment_variable(benchmark_name: 'profiler_sample_serialize')
)

x.report("sample #{ENV['CONFIG']} timeline=#{ENV['TIMELINE'] == 'true'}") do
Expand All @@ -61,26 +59,12 @@ def run_benchmark

@recorder.serialize
end

def run_forever
while true
1000.times do
Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample(@collector, PROFILER_OVERHEAD_STACK_THREAD)
end
@recorder.serialize
print '.'
end
end
end

puts "Current pid is #{Process.pid}"

ProfilerSampleSerializeBenchmark.new.instance_exec do
create_profiler
10.times { Thread.new { sleep } }
if ARGV.include?('--forever')
run_forever
else
run_benchmark
end
run_benchmark
end

0 comments on commit 0ba87b9

Please sign in to comment.