Skip to content

Commit

Permalink
Merge pull request rails#25344 from matthewd/debug-locks
Browse files Browse the repository at this point in the history
ActionDispatch::DebugLocks
  • Loading branch information
matthewd committed Jul 1, 2016
1 parent 0e797ec commit b8b5e63
Show file tree
Hide file tree
Showing 4 changed files with 172 additions and 5 deletions.
1 change: 1 addition & 0 deletions actionpack/lib/action_dispatch.rb
Expand Up @@ -50,6 +50,7 @@ class IllegalStateError < StandardError
autoload :Callbacks
autoload :Cookies
autoload :DebugExceptions
autoload :DebugLocks
autoload :ExceptionWrapper
autoload :Executor
autoload :Flash
Expand Down
122 changes: 122 additions & 0 deletions actionpack/lib/action_dispatch/middleware/debug_locks.rb
@@ -0,0 +1,122 @@
module ActionDispatch
# This middleware can be used to diagnose deadlocks in the autoload interlock.
#
# To use it, insert it near the top of the middleware stack, using
# <tt>config/application.rb</tt>:
#
# config.middleware.insert_before Rack::Sendfile, ActionDispatch::DebugLocks
#
# After restarting the application and re-triggering the deadlock condition,
# <tt>/rails/locks</tt> will show a summary of all threads currently known to
# the interlock, which lock level they are holding or awaiting, and their
# current backtrace.
#
# Generally a deadlock will be caused by the interlock conflicting with some
# other external lock or blocking I/O call. These cannot be automatically
# identified, but should be visible in the displayed backtraces.
#
# NOTE: The formatting and content of this middleware's output is intended for
# human consumption, and should be expected to change between releases.
#
# This middleware exposes operational details of the server, with no access
# control. It should only be enabled when in use, and removed thereafter.
class DebugLocks
def initialize(app, path = '/rails/locks')
@app = app
@path = path
end

def call(env)
req = ActionDispatch::Request.new env

if req.get?
path = req.path_info.chomp('/'.freeze)
if path == @path
return render_details(req)
end
end

@app.call(env)
end

private
def render_details(req)
threads = ActiveSupport::Dependencies.interlock.raw_state do |threads|
# The Interlock itself comes to a complete halt as long as this block
# is executing. That gives us a more consistent picture of everything,
# but creates a pretty strong Observer Effect.
#
# Most directly, that means we need to do as little as possible in
# this block. More widely, it means this middleware should remain a
# strictly diagnostic tool (to be used when something has gone wrong),
# and not for any sort of general monitoring.

threads.each.with_index do |(thread, info), idx|
info[:index] = idx
info[:backtrace] = thread.backtrace
end

threads
end

str = threads.map do |thread, info|
if info[:exclusive]
lock_state = 'Exclusive'
elsif info[:sharing] > 0
lock_state = 'Sharing'
lock_state << " x#{info[:sharing]}" if info[:sharing] > 1
else
lock_state = 'No lock'
end

if info[:waiting]
lock_state << ' (yielded share)'
end

msg = "Thread #{info[:index]} [0x#{thread.__id__.to_s(16)} #{thread.status || 'dead'}] #{lock_state}\n"

if info[:sleeper]
msg << " Waiting in #{info[:sleeper]}"
msg << " to #{info[:purpose].to_s.inspect}" unless info[:purpose].nil?
msg << "\n"

if info[:compatible]
compat = info[:compatible].map { |c| c == false ? "share" : c.to_s.inspect }
msg << " may be pre-empted for: #{compat.join(', ')}\n"
end

blockers = threads.values.select { |binfo| blocked_by?(info, binfo, threads.values) }
msg << " blocked by: #{blockers.map {|i| i[:index] }.join(', ')}\n" if blockers.any?
end

blockees = threads.values.select { |binfo| blocked_by?(binfo, info, threads.values) }
msg << " blocking: #{blockees.map {|i| i[:index] }.join(', ')}\n" if blockees.any?

msg << "\n#{info[:backtrace].join("\n")}\n" if info[:backtrace]
end.join("\n\n---\n\n\n")

[200, { "Content-Type" => "text/plain", "Content-Length" => str.size }, [str]]
end

def blocked_by?(victim, blocker, all_threads)
return false if victim.equal?(blocker)

case victim[:sleeper]
when :start_sharing
blocker[:exclusive] ||
(!victim[:waiting] && blocker[:compatible] && !blocker[:compatible].include?(false))
when :start_exclusive
blocker[:sharing] > 0 ||
blocker[:exclusive] ||
(blocker[:compatible] && !blocker[:compatible].include?(victim[:purpose]))
when :yield_shares
blocker[:exclusive]
when :stop_exclusive
blocker[:exclusive] ||
victim[:compatible] &&
victim[:compatible].include?(blocker[:purpose]) &&
all_threads.all? { |other| !other[:compatible] || blocker.equal?(other) || other[:compatible].include?(blocker[:purpose]) }
end
end
end
end
50 changes: 45 additions & 5 deletions activesupport/lib/active_support/concurrency/share_lock.rb
Expand Up @@ -14,13 +14,46 @@ class ShareLock
# to upgrade share locks to exclusive.


def raw_state # :nodoc:
synchronize do
threads = @sleeping.keys | @sharing.keys | @waiting.keys
threads |= [@exclusive_thread] if @exclusive_thread

data = {}

threads.each do |thread|
purpose, compatible = @waiting[thread]

data[thread] = {
thread: thread,
sharing: @sharing[thread],
exclusive: @exclusive_thread == thread,
purpose: purpose,
compatible: compatible,
waiting: !!@waiting[thread],
sleeper: @sleeping[thread],
}
end

# NB: Yields while holding our *internal* synchronize lock,
# which is supposed to be used only for a few instructions at
# a time. This allows the caller to inspect additional state
# without things changing out from underneath, but would have
# disastrous effects upon normal operation. Fortunately, this
# method is only intended to be called when things have
# already gone wrong.
yield data
end
end

def initialize
super()

@cv = new_cond

@sharing = Hash.new(0)
@waiting = {}
@sleeping = {}
@exclusive_thread = nil
@exclusive_depth = 0
end
Expand All @@ -46,7 +79,7 @@ def start_exclusive(purpose: nil, compatible: [], no_wait: false)
return false if no_wait

yield_shares(purpose: purpose, compatible: compatible, block_share: true) do
@cv.wait_while { busy_for_exclusive?(purpose) }
wait_for(:start_exclusive) { busy_for_exclusive?(purpose) }
end
end
@exclusive_thread = Thread.current
Expand All @@ -69,7 +102,7 @@ def stop_exclusive(compatible: [])

if eligible_waiters?(compatible)
yield_shares(compatible: compatible, block_share: true) do
@cv.wait_while { @exclusive_thread || eligible_waiters?(compatible) }
wait_for(:stop_exclusive) { @exclusive_thread || eligible_waiters?(compatible) }
end
end
@cv.broadcast
Expand All @@ -84,11 +117,11 @@ def start_sharing
elsif @waiting[Thread.current]
# We're nested inside a +yield_shares+ call: we'll resume as
# soon as there isn't an exclusive lock in our way
@cv.wait_while { @exclusive_thread }
wait_for(:start_sharing) { @exclusive_thread }
else
# This is an initial / outermost share call: any outstanding
# requests for an exclusive lock get to go first
@cv.wait_while { busy_for_sharing?(false) }
wait_for(:start_sharing) { busy_for_sharing?(false) }
end
@sharing[Thread.current] += 1
end
Expand Down Expand Up @@ -153,7 +186,7 @@ def yield_shares(purpose: nil, compatible: [], block_share: false)
yield
ensure
synchronize do
@cv.wait_while { @exclusive_thread && @exclusive_thread != Thread.current }
wait_for(:yield_shares) { @exclusive_thread && @exclusive_thread != Thread.current }

if previous_wait
@waiting[Thread.current] = previous_wait
Expand Down Expand Up @@ -181,6 +214,13 @@ def busy_for_sharing?(purpose)
def eligible_waiters?(compatible)
@waiting.any? { |t, (p, _)| compatible.include?(p) && @waiting.all? { |t2, (_, c2)| t == t2 || c2.include?(p) } }
end

def wait_for(method)
@sleeping[Thread.current] = method
@cv.wait_while { yield }
ensure
@sleeping.delete Thread.current
end
end
end
end
4 changes: 4 additions & 0 deletions activesupport/lib/active_support/dependencies/interlock.rb
Expand Up @@ -46,6 +46,10 @@ def permit_concurrent_loads
yield
end
end

def raw_state(&block) # :nodoc:
@lock.raw_state(&block)
end
end
end
end

0 comments on commit b8b5e63

Please sign in to comment.