Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Updating resque test suite to version 1.22.0

  • Loading branch information...
commit b80286c212718f2332eb78d65f2167ad45e6e064 1 parent 44a5592
@mateusdelbianco mateusdelbianco authored
View
11 Gemfile
@@ -6,3 +6,14 @@ group :development do
gem 'ruby-debug', :platform => :mri_18
gem 'debugger', :platform => :mri_19
end
+
+group :test do
+ gem "rake"
+ gem "rack-test", "~> 0.5"
+ gem "mocha", "~> 0.9.7"
+ gem "yajl-ruby", "~>0.8.2", :platforms => :mri
+ gem "json", "~>1.5.3", :platforms => [:jruby, :rbx]
+ gem "hoptoad_notifier"
+ gem "airbrake"
+ gem "i18n"
+end
View
27 test/airbrake_test.rb
@@ -0,0 +1,27 @@
+
+require 'test_helper'
+
+begin
+ require 'airbrake'
+rescue LoadError
+ warn "Install airbrake gem to run Airbrake tests."
+end
+
+if defined? Airbrake
+ require 'resque/failure/airbrake'
+ context "Airbrake" do
+ test "should be notified of an error" do
+ exception = StandardError.new("BOOM")
+ worker = Resque::Worker.new(:test)
+ queue = "test"
+ payload = {'class' => Object, 'args' => 66}
+
+ Airbrake.expects(:notify_or_ignore).with(
+ exception,
+ :parameters => {:payload_class => 'Object', :payload_args => '66'})
+
+ backend = Resque::Failure::Airbrake.new(exception, worker, queue, payload)
+ backend.save
+ end
+ end
+end
View
BIN  test/dump.rdb
Binary file not shown
View
1  test/hoptoad_test.rb
@@ -7,6 +7,7 @@
end
if defined? HoptoadNotifier
+ require 'resque/failure/hoptoad'
context "Hoptoad" do
test "should be notified of an error" do
exception = StandardError.new("BOOM")
View
141 test/job_hooks_test.rb
@@ -250,6 +250,106 @@ def self.perform(history)
end
end
+
+context "Resque::Job before_enqueue" do
+ include PerformJob
+
+ class ::BeforeEnqueueJob
+ @queue = :jobs
+ def self.before_enqueue_record_history(history)
+ history << :before_enqueue
+ end
+
+ def self.perform(history)
+ end
+ end
+
+ class ::BeforeEnqueueJobAbort
+ @queue = :jobs
+ def self.before_enqueue_abort(history)
+ false
+ end
+
+ def self.perform(history)
+ end
+ end
+
+ test "the before enqueue hook should run" do
+ history = []
+ @worker = Resque::Worker.new(:jobs)
+ assert Resque.enqueue(BeforeEnqueueJob, history)
+ @worker.work(0)
+ assert_equal history, [:before_enqueue], "before_enqueue was not run"
+ end
+
+ test "a before enqueue hook that returns false should prevent the job from getting queued" do
+ history = []
+ @worker = Resque::Worker.new(:jobs)
+ assert_nil Resque.enqueue(BeforeEnqueueJobAbort, history)
+ assert_equal 0, Resque.size(:jobs)
+ end
+end
+
+context "Resque::Job after_dequeue" do
+ include PerformJob
+
+ class ::AfterDequeueJob
+ @queue = :jobs
+ def self.after_dequeue_record_history(history)
+ history << :after_dequeue
+ end
+
+ def self.perform(history)
+ end
+ end
+
+ test "the after dequeue hook should run" do
+ history = []
+ @worker = Resque::Worker.new(:jobs)
+ Resque.dequeue(AfterDequeueJob, history)
+ @worker.work(0)
+ assert_equal history, [:after_dequeue], "after_dequeue was not run"
+ end
+end
+
+
+context "Resque::Job before_dequeue" do
+ include PerformJob
+
+ class ::BeforeDequeueJob
+ @queue = :jobs
+ def self.before_dequeue_record_history(history)
+ history << :before_dequeue
+ end
+
+ def self.perform(history)
+ end
+ end
+
+ class ::BeforeDequeueJobAbort
+ @queue = :jobs
+ def self.before_dequeue_abort(history)
+ false
+ end
+
+ def self.perform(history)
+ end
+ end
+
+ test "the before dequeue hook should run" do
+ history = []
+ @worker = Resque::Worker.new(:jobs)
+ Resque.dequeue(BeforeDequeueJob, history)
+ @worker.work(0)
+ assert_equal history, [:before_dequeue], "before_dequeue was not run"
+ end
+
+ test "a before dequeue hook that returns false should prevent the job from getting dequeued" do
+ history = []
+ assert_equal nil, Resque.dequeue(BeforeDequeueJobAbort, history)
+ end
+end
+
context "Resque::Job all hooks" do
include PerformJob
@@ -320,4 +420,45 @@ def self.on_failure_record_history(exception, history)
"oh no"
]
end
+
+ class ::CallbacksInline
+ @queue = :callbacks_inline
+
+ def self.before_perform_record_history(history, count)
+ history << :before_perform
+ count['count'] += 1
+ end
+
+ def self.after_perform_record_history(history, count)
+ history << :after_perform
+ count['count'] += 1
+ end
+
+ def self.around_perform_record_history(history, count)
+ history << :start_around_perform
+ count['count'] += 1
+ yield
+ history << :finish_around_perform
+ count['count'] += 1
+ end
+
+ def self.perform(history, count)
+ history << :perform
+ $history = history
+ $count = count
+ end
+ end
+
+ test "it runs callbacks when inline is true" do
+ begin
+ Resque.inline = true
+ # Sending down two parameters that can be passed and updated by reference
+ result = Resque.enqueue(CallbacksInline, [], {'count' => 0})
+ assert_equal true, result, "perform returned true"
+ assert_equal $history, [:before_perform, :start_around_perform, :perform, :finish_around_perform, :after_perform]
+ assert_equal 4, $count['count']
+ ensure
+ Resque.inline = false
+ end
+ end
end
View
115 test/redis-test-cluster.conf
@@ -0,0 +1,115 @@
+# Redis configuration file example
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize yes
+
+# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
+# You can specify a custom pid file location here.
+pidfile ./test/redis-test-cluster.pid
+
+# Accept connections on the specified port, default is 6379
+port 9737
+
+# If you want you can bind a single interface, if the bind option is not
+# specified all the interfaces will listen for connections.
+#
+# bind 127.0.0.1
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 300
+
+# Save the DB on disk:
+#
+# save <seconds> <changes>
+#
+# Will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# In the example below the behaviour will be to save:
+# after 900 sec (15 min) if at least 1 key changed
+# after 300 sec (5 min) if at least 10 keys changed
+# after 60 sec if at least 10000 keys changed
+save 900 1
+save 300 10
+save 60 10000
+
+# The filename where to dump the DB
+dbfilename dump-cluster.rdb
+
+# For default save/load DB in/from the working directory
+# Note that you must specify a directory not a file name.
+dir ./test/
+
+# Set server verbosity to 'debug'
+# it can be one of:
+# debug (a lot of information, useful for development/testing)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel debug
+
+# Specify the log file name. Also 'stdout' can be used to force
+# the demon to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile stdout
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+################################# REPLICATION #################################
+
+# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# another Redis server. Note that the configuration is local to the slave
+# so for example it is possible to configure the slave to save the DB with a
+# different interval, or to listen to another port, and so on.
+
+# slaveof <masterip> <masterport>
+
+################################## SECURITY ###################################
+
+# Require clients to issue AUTH <PASSWORD> before processing any other
+# commands. This might be useful in environments in which you do not trust
+# others with access to the host running redis-server.
+#
+# This should stay commented out for backward compatibility and because most
+# people do not need auth (e.g. they run their own servers).
+
+# requirepass foobared
+
+################################### LIMITS ####################################
+
+# Set the max number of connected clients at the same time. By default there
+# is no limit, and it's up to the number of file descriptors the Redis process
+# is able to open. The special value '0' means no limts.
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+
+# maxclients 128
+
+# Don't use more memory than the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys with an
+# EXPIRE set. It will try to start freeing keys that are going to expire
+# in little time and preserve keys with a longer time to live.
+# Redis will also try to remove objects from free lists if possible.
+#
+# If all this fails, Redis will start to reply with errors to commands
+# that will use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to most read-only commands like GET.
+#
+# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
+# 'state' server or cache, not as a real DB. When Redis is used as a real
+# database the memory usage will grow over the weeks, it will be obvious if
+# it is going to use too much memory in the long run, and you'll have the time
+# to upgrade. With maxmemory after the limit is reached you'll start to get
+# errors for write operations, and this may even lead to DB inconsistency.
+
+# maxmemory <bytes>
+
+############################### ADVANCED CONFIG ###############################
+
+# Glue small output buffers together in order to send small replies in a
+# single TCP packet. Uses a bit more CPU but most of the times it is a win
+# in terms of number of queries per second. Use 'yes' if unsure.
+glueoutputbuf yes
View
6 test/resque-web_test.rb
@@ -51,3 +51,9 @@
should_respond_with_success
end
+
+context "also works with slash at the end" do
+ setup { get "/working/" }
+
+ should_respond_with_success
+end
View
23 test/resque_failure_redis_test.rb
@@ -0,0 +1,23 @@
+require 'test_helper'
+require 'resque/failure/redis'
+
+context "Resque::Failure::Redis" do
+ setup do
+ @bad_string = [39, 250, 141, 168, 138, 191, 52, 211, 159, 86, 93, 95, 39].map { |c| c.chr }.join
+ exception = StandardError.exception(@bad_string)
+ worker = Resque::Worker.new(:test)
+ queue = "queue"
+ payload = { "class" => Object, "args" => 3 }
+ @redis_backend = Resque::Failure::Redis.new(exception, worker, queue, payload)
+ end
+
+ test 'cleans up bad strings before saving the failure, in order to prevent errors on the resque UI' do
+ # test assumption: the bad string should not be able to round trip though JSON
+ assert_raises(MultiJson::DecodeError) {
+ MultiJson.decode(MultiJson.encode(@bad_string))
+ }
+
+ @redis_backend.save
+ Resque::Failure::Redis.all # should not raise an error
+ end
+end
View
23 test/resque_test.rb
@@ -7,6 +7,11 @@
Resque.push(:people, { 'name' => 'chris' })
Resque.push(:people, { 'name' => 'bob' })
Resque.push(:people, { 'name' => 'mark' })
+ @original_redis = Resque.redis
+ end
+
+ teardown do
+ Resque.redis = @original_redis
end
test "can set a namespace through a url-like string" do
@@ -131,6 +136,16 @@
assert_equal nil, Resque.reserve(:method)
end
+ test "can define a queue for jobs by way of a method" do
+ assert_equal 0, Resque.size(:method)
+ assert Resque.enqueue_to(:new_queue, SomeMethodJob, 20, '/tmp')
+
+ job = Resque.reserve(:new_queue)
+ assert_equal SomeMethodJob, job.payload_class
+ assert_equal 20, job.args[0]
+ assert_equal '/tmp', job.args[1]
+ end
+
test "needs to infer a queue with enqueue" do
assert_raises Resque::NoQueueError do
Resque.enqueue(SomeJob, 20, '/tmp')
@@ -201,7 +216,7 @@
end
test "keeps track of resque keys" do
- assert_equal ["queue:people", "queues"], Resque.keys
+ assert_equal ["queue:people", "queues"].sort, Resque.keys.sort
end
test "badly wants a class name, too" do
@@ -238,7 +253,11 @@
assert_equal 3, stats[:queues]
assert_equal 3, stats[:processed]
assert_equal 1, stats[:failed]
- assert_equal [Resque.redis.respond_to?(:server) ? 'localhost:9736' : 'redis://localhost:9736/0'], stats[:servers]
+ if ENV.key? 'RESQUE_DISTRIBUTED'
+ assert_equal [Resque.redis.respond_to?(:server) ? 'localhost:9736, localhost:9737' : 'redis://localhost:9736/0, redis://localhost:9737/0'], stats[:servers]
+ else
+ assert_equal [Resque.redis.respond_to?(:server) ? 'localhost:9736' : 'redis://localhost:9736/0'], stats[:servers]
+ end
end
test "decode bad json" do
View
56 test/test_helper.rb
@@ -1,13 +1,13 @@
require 'rubygems'
-require 'bundler'
-Bundler.setup(:default, :test)
-Bundler.require(:default, :test)
-dir = File.dirname(File.expand_path(__FILE__))
-$LOAD_PATH.unshift dir + '/../lib'
+$dir = File.dirname(File.expand_path(__FILE__))
+$LOAD_PATH.unshift $dir + '/../lib'
$TESTING = true
require 'test/unit'
+require 'redis/namespace'
+require 'resque'
+
begin
require 'leftright'
rescue LoadError
@@ -39,16 +39,26 @@
exit_code = Test::Unit::AutoRunner.run
end
- pid = `ps -A -o pid,command | grep [r]edis-test`.split(" ")[0]
+ processes = `ps -A -o pid,command | grep [r]edis-test`.split("\n")
+ pids = processes.map { |process| process.split(" ")[0] }
puts "Killing test redis server..."
- `rm -f #{dir}/dump.rdb`
- Process.kill("KILL", pid.to_i)
+ pids.each { |pid| Process.kill("TERM", pid.to_i) }
+ system("rm -f #{$dir}/dump.rdb #{$dir}/dump-cluster.rdb")
exit exit_code
end
-puts "Starting redis for testing at localhost:9736..."
-`redis-server #{dir}/redis-test.conf`
-Resque.redis = 'localhost:9736'
+if ENV.key? 'RESQUE_DISTRIBUTED'
+ require 'redis/distributed'
+ puts "Starting redis for testing at localhost:9736 and localhost:9737..."
+ `redis-server #{$dir}/redis-test.conf`
+ `redis-server #{$dir}/redis-test-cluster.conf`
+ r = Redis::Distributed.new(['redis://localhost:9736', 'redis://localhost:9737'])
+ Resque.redis = Redis::Namespace.new :resque, :redis => r
+else
+ puts "Starting redis for testing at localhost:9736..."
+ `redis-server #{$dir}/redis-test.conf`
+ Resque.redis = 'localhost:9736'
+end
##
@@ -134,15 +144,31 @@ def with_failure_backend(failure_backend, &block)
Resque::Failure.backend = previous_backend
end
+require 'time'
+
class Time
# Thanks, Timecop
class << self
+ attr_accessor :fake_time
+
alias_method :now_without_mock_time, :now
- def now_with_mock_time
- $fake_time || now_without_mock_time
+ def now
+ fake_time || now_without_mock_time
end
-
- alias_method :now, :now_with_mock_time
end
+
+ self.fake_time = nil
+end
+
+def capture_stderr
+ # The output stream must be an IO-like object. In this case we capture it in
+ # an in-memory IO object so we can return the string value. You can assign any
+ # IO object here.
+ previous_stderr, $stderr = $stderr, StringIO.new
+ yield
+ $stderr.string
+ensure
+ # Restore the previous value of stderr (typically equal to STDERR).
+ $stderr = previous_stderr
end
View
276 test/worker_test.rb
@@ -33,12 +33,53 @@
end
test "fails uncompleted jobs on exit" do
- job = Resque::Job.new(:jobs, [GoodJob, "blah"])
+ job = Resque::Job.new(:jobs, {'class' => 'GoodJob', 'args' => "blah"})
@worker.working_on(job)
@worker.unregister_worker
assert_equal 1, Resque::Failure.count
end
+ class ::SimpleJobWithFailureHandling
+ def self.on_failure_record_failure(exception, *job_args)
+ @@exception = exception
+ end
+
+ def self.exception
+ @@exception
+ end
+ end
+
+ test "fails uncompleted jobs on exit, and calls failure hook" do
+ job = Resque::Job.new(:jobs, {'class' => 'SimpleJobWithFailureHandling', 'args' => ""})
+ @worker.working_on(job)
+ @worker.unregister_worker
+ assert_equal 1, Resque::Failure.count
+ assert(SimpleJobWithFailureHandling.exception.kind_of?(Resque::DirtyExit))
+ end
+
+ class ::SimpleFailingJob
+ @@exception_count = 0
+
+ def self.on_failure_record_failure(exception, *job_args)
+ @@exception_count += 1
+ end
+
+ def self.exception_count
+ @@exception_count
+ end
+
+ def self.perform
+ raise Exception.new
+ end
+ end
+
+ test "only calls failure hook once on exception" do
+ job = Resque::Job.new(:jobs, {'class' => 'SimpleFailingJob', 'args' => ""})
+ @worker.perform(job)
+ assert_equal 1, Resque::Failure.count
+ assert_equal 1, SimpleFailingJob.exception_count
+ end
+
test "can peek at failed jobs" do
10.times { Resque::Job.create(:jobs, BadJob) }
@worker.work(0)
@@ -97,6 +138,36 @@
assert_equal 0, Resque.size(:blahblah)
end
+ test "can work with wildcard at the end of the list" do
+ Resque::Job.create(:high, GoodJob)
+ Resque::Job.create(:critical, GoodJob)
+ Resque::Job.create(:blahblah, GoodJob)
+ Resque::Job.create(:beer, GoodJob)
+
+ worker = Resque::Worker.new(:critical, :high, "*")
+
+ worker.work(0)
+ assert_equal 0, Resque.size(:high)
+ assert_equal 0, Resque.size(:critical)
+ assert_equal 0, Resque.size(:blahblah)
+ assert_equal 0, Resque.size(:beer)
+ end
+
+ test "can work with wildcard at the middle of the list" do
+ Resque::Job.create(:high, GoodJob)
+ Resque::Job.create(:critical, GoodJob)
+ Resque::Job.create(:blahblah, GoodJob)
+ Resque::Job.create(:beer, GoodJob)
+
+ worker = Resque::Worker.new(:critical, "*", :high)
+
+ worker.work(0)
+ assert_equal 0, Resque.size(:high)
+ assert_equal 0, Resque.size(:critical)
+ assert_equal 0, Resque.size(:blahblah)
+ assert_equal 0, Resque.size(:beer)
+ end
+
test "processes * queues in alphabetical order" do
Resque::Job.create(:high, GoodJob)
Resque::Job.create(:critical, GoodJob)
@@ -215,7 +286,7 @@
test "knows when it started" do
time = Time.now
@worker.work(0) do
- assert_equal time.to_s, @worker.started.to_s
+ assert Time.parse(@worker.started) - time < 0.1
end
end
@@ -267,6 +338,11 @@
end
end
+ test "worker_pids returns pids" do
+ known_workers = @worker.worker_pids
+ assert !known_workers.empty?
+ end
+
test "Processed jobs count" do
@worker.work(0)
assert_equal 1, Resque.info[:processed]
@@ -302,16 +378,22 @@
end
test "very verbose works in the afternoon" do
- require 'time'
- $last_puts = ""
- $fake_time = Time.parse("15:44:33 2011-03-02")
- singleton = class << @worker; self end
- singleton.send :define_method, :puts, lambda { |thing| $last_puts = thing }
+ begin
+ require 'time'
+ last_puts = ""
+ Time.fake_time = Time.parse("15:44:33 2011-03-02")
- @worker.very_verbose = true
- @worker.log("some log text")
+ @worker.extend(Module.new {
+ define_method(:puts) { |thing| last_puts = thing }
+ })
- assert_match /\*\* \[15:44:33 2011-03-02\] \d+: some log text/, $last_puts
+ @worker.very_verbose = true
+ @worker.log("some log text")
+
+ assert_match /\*\* \[15:44:33 2011-03-02\] \d+: some log text/, last_puts
+ ensure
+ Time.fake_time = nil
+ end
end
test "Will call an after_fork hook after forking" do
@@ -329,4 +411,178 @@
test "returns PID of running process" do
assert_equal @worker.to_s.split(":")[1].to_i, @worker.pid
end
+
+ test "requeue failed queue" do
+ queue = 'good_job'
+ Resque::Failure.create(:exception => Exception.new, :worker => Resque::Worker.new(queue), :queue => queue, :payload => {'class' => 'GoodJob'})
+ Resque::Failure.create(:exception => Exception.new, :worker => Resque::Worker.new(queue), :queue => 'some_job', :payload => {'class' => 'SomeJob'})
+ Resque::Failure.requeue_queue(queue)
+ assert Resque::Failure.all(0).has_key?('retried_at')
+ assert !Resque::Failure.all(1).has_key?('retried_at')
+ end
+
+ test "remove failed queue" do
+ queue = 'good_job'
+ queue2 = 'some_job'
+ Resque::Failure.create(:exception => Exception.new, :worker => Resque::Worker.new(queue), :queue => queue, :payload => {'class' => 'GoodJob'})
+ Resque::Failure.create(:exception => Exception.new, :worker => Resque::Worker.new(queue2), :queue => queue2, :payload => {'class' => 'SomeJob'})
+ Resque::Failure.create(:exception => Exception.new, :worker => Resque::Worker.new(queue), :queue => queue, :payload => {'class' => 'GoodJob'})
+ Resque::Failure.remove_queue(queue)
+ assert_equal queue2, Resque::Failure.all(0)['queue']
+ assert_equal 1, Resque::Failure.count
+ end
+
+ test "reconnects to redis after fork" do
+ original_connection = Resque.redis.client.connection.instance_variable_get("@sock")
+ @worker.work(0)
+ assert_not_equal original_connection, Resque.redis.client.connection.instance_variable_get("@sock")
+ end
+
+ if !defined?(RUBY_ENGINE) || defined?(RUBY_ENGINE) && RUBY_ENGINE != "jruby"
+ test "old signal handling is the default" do
+ rescue_time = nil
+
+ begin
+ class LongRunningJob
+ @queue = :long_running_job
+
+ def self.perform( run_time, rescue_time=nil )
+ Resque.redis.client.reconnect # get its own connection
+ Resque.redis.rpush( 'sigterm-test:start', Process.pid )
+ sleep run_time
+ Resque.redis.rpush( 'sigterm-test:result', 'Finished Normally' )
+ rescue Resque::TermException => e
+ Resque.redis.rpush( 'sigterm-test:result', %Q(Caught SignalException: #{e.inspect}))
+ sleep rescue_time unless rescue_time.nil?
+ ensure
+ puts 'fuuuu'
+ Resque.redis.rpush( 'sigterm-test:final', 'exiting.' )
+ end
+ end
+
+ Resque.enqueue( LongRunningJob, 5, rescue_time )
+
+ worker_pid = Kernel.fork do
+ # ensure we actually fork
+ $TESTING = false
+ # reconnect since we just forked
+ Resque.redis.client.reconnect
+
+ worker = Resque::Worker.new(:long_running_job)
+
+ worker.work(0)
+ exit!
+ end
+
+ # ensure the worker is started
+ start_status = Resque.redis.blpop( 'sigterm-test:start', 5 )
+ assert_not_nil start_status
+ child_pid = start_status[1].to_i
+ assert_operator child_pid, :>, 0
+
+ # send signal to abort the worker
+ Process.kill('TERM', worker_pid)
+ Process.waitpid(worker_pid)
+
+ # wait to see how it all came down
+ result = Resque.redis.blpop( 'sigterm-test:result', 5 )
+ assert_nil result
+
+ # ensure that the child pid is no longer running
+ child_still_running = !(`ps -p #{child_pid.to_s} -o pid=`).empty?
+ assert !child_still_running
+ ensure
+ remaining_keys = Resque.redis.keys('sigterm-test:*') || []
+ Resque.redis.del(*remaining_keys) unless remaining_keys.empty?
+ end
+ end
+ end
+
+ if !defined?(RUBY_ENGINE) || defined?(RUBY_ENGINE) && RUBY_ENGINE != "jruby"
+ [SignalException, Resque::TermException].each do |exception|
+ {
+ 'cleanup occurs in allotted time' => nil,
+ 'cleanup takes too long' => 2
+ }.each do |scenario,rescue_time|
+ test "SIGTERM when #{scenario} while catching #{exception}" do
+ begin
+ eval("class LongRunningJob; @@exception = #{exception}; end")
+ class LongRunningJob
+ @queue = :long_running_job
+
+ def self.perform( run_time, rescue_time=nil )
+ Resque.redis.client.reconnect # get its own connection
+ Resque.redis.rpush( 'sigterm-test:start', Process.pid )
+ sleep run_time
+ Resque.redis.rpush( 'sigterm-test:result', 'Finished Normally' )
+ rescue @@exception => e
+ Resque.redis.rpush( 'sigterm-test:result', %Q(Caught SignalException: #{e.inspect}))
+ sleep rescue_time unless rescue_time.nil?
+ ensure
+ Resque.redis.rpush( 'sigterm-test:final', 'exiting.' )
+ end
+ end
+
+ Resque.enqueue( LongRunningJob, 5, rescue_time )
+
+ worker_pid = Kernel.fork do
+ # ensure we actually fork
+ $TESTING = false
+ # reconnect since we just forked
+ Resque.redis.client.reconnect
+
+ worker = Resque::Worker.new(:long_running_job)
+ worker.term_timeout = 1
+ worker.term_child = 1
+
+ worker.work(0)
+ exit!
+ end
+
+ # ensure the worker is started
+ start_status = Resque.redis.blpop( 'sigterm-test:start', 5 )
+ assert_not_nil start_status
+ child_pid = start_status[1].to_i
+ assert_operator child_pid, :>, 0
+
+ # send signal to abort the worker
+ Process.kill('TERM', worker_pid)
+ Process.waitpid(worker_pid)
+
+ # wait to see how it all came down
+ result = Resque.redis.blpop( 'sigterm-test:result', 5 )
+ assert_not_nil result
+ assert !result[1].start_with?('Finished Normally'), 'Job Finished normally. Sleep not long enough?'
+ assert result[1].start_with? 'Caught SignalException', 'Signal exception not raised in child.'
+
+ # ensure that the child pid is no longer running
+ child_still_running = !(`ps -p #{child_pid.to_s} -o pid=`).empty?
+ assert !child_still_running
+
+ # see if post-cleanup occurred. This should happen IFF the rescue_time is less than the term_timeout
+ post_cleanup_occurred = Resque.redis.lpop( 'sigterm-test:final' )
+ assert post_cleanup_occurred, 'post cleanup did not occur. SIGKILL sent too early?' if rescue_time.nil?
+ assert !post_cleanup_occurred, 'post cleanup occurred. SIGKILL sent too late?' unless rescue_time.nil?
+
+ ensure
+ remaining_keys = Resque.redis.keys('sigterm-test:*') || []
+ Resque.redis.del(*remaining_keys) unless remaining_keys.empty?
+ end
+ end
+ end
+ end
+
+ test "displays warning when not using term_child" do
+ stderr = capture_stderr { @worker.work(0) }
+
+ assert stderr.match(/^WARNING:/)
+ end
+
+ test "it does not display warning when using term_child" do
+ @worker.term_child = "1"
+ stderr = capture_stderr { @worker.work(0) }
+
+ assert !stderr.match(/^WARNING:/)
+ end
+ end
end
Please sign in to comment.
Something went wrong with that request. Please try again.