Permalink
Browse files

Allow successful jobs to (optionally) stay, recording finished_at.

Just like for failed jobs, now we have the option:
Delayed::Job.destroy_successful_jobs = false
  • Loading branch information...
Helder Ribeiro authored and PatrickTulskie committed May 13, 2009
1 parent 82c9740 commit a7aeece3360b0cd20c486cb6f04382d8e7965282
Showing with 61 additions and 7 deletions.
  1. +7 −3 README.textile
  2. +10 −3 lib/delayed/job.rb
  3. +1 −0 spec/database.rb
  4. +43 −1 spec/job_spec.rb
View
@@ -25,24 +25,28 @@ The library evolves around a delayed_jobs table which looks as follows:
table.datetime :locked_at # Set when a client is working on this object
table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead)
table.string :locked_by # Who is working on this object (if locked)
+ table.datetime :finished_at # Used for statiscics / monitoring
table.timestamps
end
On failure, the job is scheduled again in 5 seconds + N ** 4, where N is the number of retries.
-The default MAX_ATTEMPTS is 25. After this, the job either deleted (default), or left in the database with "failed_at" set.
+The default MAX_ATTEMPTS is 25. After this, the job is either deleted (default), or left in the database with "failed_at" set.
With the default of 25 attempts, the last retry will be 20 days later, with the last interval being almost 100 hours.
The default MAX_RUN_TIME is 4.hours. If your job takes longer than that, another computer could pick it up. It's up to you to
make sure your job doesn't exceed this time. You should set this to the longest time you think the job could take.
-By default, it will delete failed jobs (and it always deletes successful jobs). If you want to keep failed jobs, set
-Delayed::Job.destroy_failed_jobs = false. The failed jobs will be marked with non-null failed_at.
+By default, it will delete failed jobs. If you want to keep failed jobs, set
+@Delayed::Job.destroy_failed_jobs = false@. The failed jobs will be marked with non-null failed_at.
+
+Same thing for successful jobs. They're deleted by default and, to keep them, set @Delayed::Job.destroy_successful_jobs = false@. They will be marked with finished_at. This is useful for gathering statistics like how long jobs took between entering the queue (created_at) and being finished (finished_at).
Here is an example of changing job parameters in Rails:
# config/initializers/delayed_job_config.rb
Delayed::Job.destroy_failed_jobs = false
+ Delayed::Job.destroy_successful_jobs = false
silence_warnings do
Delayed::Job.const_set("MAX_ATTEMPTS", 3)
Delayed::Job.const_set("MAX_RUN_TIME", 5.minutes)
View
@@ -18,13 +18,19 @@ class Job < ActiveRecord::Base
cattr_accessor :destroy_failed_jobs
self.destroy_failed_jobs = true
+ # By default successful jobs are destroyed after finished.
+ # If you want to keep them around (for statistics/monitoring),
+ # set this to false.
+ cattr_accessor :destroy_successful_jobs
+ self.destroy_successful_jobs = true
+
# Every worker has a unique name which by default is the pid of the process.
# There are some advantages to overriding this with something which survives worker retarts:
# Workers can safely resume working on tasks which are locked by themselves. The worker will assume that it crashed before.
cattr_accessor :worker_name
self.worker_name = "host:#{Socket.gethostname} pid:#{Process.pid}" rescue "pid:#{Process.pid}"
- NextTaskSQL = '(run_at <= ? AND (locked_at IS NULL OR locked_at < ?) OR (locked_by = ?)) AND failed_at IS NULL'
+ NextTaskSQL = '(run_at <= ? AND (locked_at IS NULL OR locked_at < ?) OR (locked_by = ?)) AND failed_at IS NULL AND finished_at IS NULL'
NextTaskOrder = 'priority DESC, run_at ASC'
ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
@@ -91,9 +97,10 @@ def run_with_lock(max_run_time, worker_name)
begin
runtime = Benchmark.realtime do
- Timeout.timeout(max_run_time.to_i) { invoke_job }
- destroy
+ invoke_job # TODO: raise error if takes longer than max_run_time
end
+ destroy_successful_jobs ? destroy :
+ update_attribute(:finished_at, Time.now)
# TODO: warn if runtime > max_run_time ?
logger.info "* [JOB] #{name} completed after %.4f" % runtime
return true # did work
View
@@ -23,6 +23,7 @@
table.datetime :locked_at
table.string :locked_by
table.datetime :failed_at
+ table.datetime :finished_at
table.timestamps
end
View
@@ -96,7 +96,49 @@ def perform; @@runs += 1; end
M::ModuleJob.runs.should == 1
end
-
+
+ it "should be destroyed if it succeeded and we want to destroy jobs" do
+ default = Delayed::Job.destroy_successful_jobs
+ Delayed::Job.destroy_successful_jobs = true
+
+ Delayed::Job.enqueue SimpleJob.new
+ Delayed::Job.work_off
+
+ Delayed::Job.count.should == 0
+
+ Delayed::Job.destroy_successful_jobs = default
+ end
+
+ it "should be kept if it succeeded and we don't want to destroy jobs" do
+ default = Delayed::Job.destroy_successful_jobs
+ Delayed::Job.destroy_successful_jobs = false
+
+ Delayed::Job.enqueue SimpleJob.new
+ Delayed::Job.work_off
+
+ Delayed::Job.count.should == 1
+
+ Delayed::Job.destroy_successful_jobs = default
+ end
+
+ it "should be finished if it succeeded and we don't want to destroy jobs" do
+ default = Delayed::Job.destroy_successful_jobs
+ Delayed::Job.destroy_successful_jobs = false
+ @job = Delayed::Job.create :payload_object => SimpleJob.new
+
+ @job.reload.finished_at.should == nil
+ Delayed::Job.work_off
+ @job.reload.finished_at.should_not == nil
+
+ Delayed::Job.destroy_successful_jobs = default
+ end
+
+ it "should never find finished jobs" do
+ @job = Delayed::Job.create :payload_object => SimpleJob.new,
+ :finished_at => Time.now
+ Delayed::Job.find_available(1).length.should == 0
+ end
+
it "should re-schedule by about 1 second at first and increment this more and more minutes when it fails to execute properly" do
Delayed::Job.enqueue ErrorJob.new
Delayed::Job.work_off(1)

0 comments on commit a7aeece

Please sign in to comment.