Skip to content

Commit

Permalink
fix: Get the whole redis slowlog, rather than just 10
Browse files Browse the repository at this point in the history
  • Loading branch information
jim80net committed Apr 28, 2020
1 parent ba71589 commit bbbfa48
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 51 deletions.
13 changes: 11 additions & 2 deletions lib/slowlog_check.rb
Expand Up @@ -3,6 +3,7 @@

class SlowlogCheck
::LOGGER ||= ::Logger.new($stdout)
MAXLENGTH = 1048576 #255 levels of recursion for #

def initialize(params = {})
@ddog = params.fetch(:ddog)
Expand Down Expand Up @@ -114,8 +115,16 @@ def add_metric_to_bucket(prior, new)
}
end

def redis_slowlog
@redis.slowlog('get')
def did_i_get_it_all?(slowlog)
slowlog[-1][0] == 0
end

def redis_slowlog(length=128)
resp = @redis.slowlog('get', length)

return resp if length > MAXLENGTH
return resp if did_i_get_it_all?(resp)
return redis_slowlog(length * 2)
end

def slowlogs_by_flush_interval
Expand Down
139 changes: 90 additions & 49 deletions spec/slowlog_check_spec.rb
Expand Up @@ -17,60 +17,31 @@
let(:frozen_time) { Time.utc(2020,4,20,4,20,45) }
let(:ddog_time) { Time.utc(2020,4,20,4,16).to_i * 1000.0 }

def redis_slowlog(index, time, microseconds)
[
index,
time.to_i,
microseconds,
[
"eval",
"",
"0"
],
"192.0.2.40:55700",
""
]
end

before(:example) do
##
# redis mock
allow(redis).to receive(:connection) { {host: 'master.replicationgroup.abcde.use2.cache.amazonaws.com' } }
allow(redis).to receive(:slowlog).with('get') {
allow(redis).to receive(:slowlog).with('get', 128) {
[
[
3,
Time.utc(2020,04,20,04,19,45).to_i,
400000,
[
"eval",
"",
"0"
],
"192.0.2.40:55700",
""
],
[
2,
Time.utc(2020,04,20,04,19,15).to_i,
100000,
[
"eval",
"",
"0"
],
"192.0.2.40:55700",
""
],
[
1,
Time.utc(2020,04,20,04,18,45).to_i,
100000,
[
"eval",
"",
"0"
],
"192.0.2.40:55700",
""
],
[
0,
Time.utc(2020,04,20,04,18,15).to_i,
200000,
[
"eval",
"",
"0"
],
"192.0.2.40:55700",
""
]
redis_slowlog( 3, Time.utc(2020,04,20,04,19,45), 400000 ),
redis_slowlog( 2, Time.utc(2020,04,20,04,19,15), 100000 ),
redis_slowlog( 1, Time.utc(2020,04,20,04,18,45), 100000 ),
redis_slowlog( 0, Time.utc(2020,04,20,04,18,15), 200000 ),
]
}

Expand Down Expand Up @@ -120,6 +91,76 @@
allow_any_instance_of(Logger).to receive(:info) {}
end

describe '#redis_slowlog.length' do
context 'redis has 4 entries' do
before(:each) do
allow(redis).to receive(:slowlog).with('get', 128) {
[
redis_slowlog( 3, Time.utc(2020,04,20,04,19,45), 400000 ),
redis_slowlog( 2, Time.utc(2020,04,20,04,19,15), 100000 ),
redis_slowlog( 1, Time.utc(2020,04,20,04,18,45), 100000 ),
redis_slowlog( 0, Time.utc(2020,04,20,04,18,15), 200000 ),
]
}
end

subject { slowlog_check.redis_slowlog.length }
it { is_expected.to eq(4) }
end

context 'redis has 129 entries and a zeroeth entry' do
before(:each) do
allow(redis).to receive(:slowlog).with('get', 128) {
Array.new(129) { |x|
redis_slowlog(x, Time.utc(2020,04,20,04,00,00) + x, x * 1000)
}.reverse[0..127]
}

allow(redis).to receive(:slowlog).with('get', 256) {
Array.new(129) { |x|
redis_slowlog(x, Time.utc(2020,04,20,04,00,00) + x, x * 1000)
}.reverse
}

end

subject { slowlog_check.redis_slowlog.length }
it { is_expected.to eq(129) }
end

context 'redis has 1048576 * 2 + 1 entries and a zeroeth entry' do
let(:sauce) {
Array.new(1048576 * 2 + 1) { |x|
redis_slowlog(x, 1587352800, x) #lettuce not create so many unnecessary Time objects
}.reverse
}
before(:each) do
allow(redis).to receive(:slowlog) { |_,number|
sauce[0..number-1]
}
end

subject { slowlog_check.redis_slowlog.length }
it { is_expected.to eq(1048576 * 2) } # with the last entry dropped
end

context 'redis has 567 entries and no zeroeth entry' do
let(:sauce) {
Array.new(567) { |x|
redis_slowlog(x, Time.utc(2020,04,20,03,20,00) + x, x)
}.reverse
}
before(:each) do
allow(redis).to receive(:slowlog) { |_,number|
sauce[0..number-1]
}
end

subject { slowlog_check.redis_slowlog.length }
it { is_expected.to eq(567) }

end
end

describe '#replication_group' do
subject { slowlog_check.replication_group }
Expand Down

0 comments on commit bbbfa48

Please sign in to comment.