/
task_timer_job.rb
53 lines (45 loc) · 1.61 KB
/
task_timer_job.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# frozen_string_literal: true
class TaskTimerJob < CaseflowJob
# For time_ago_in_words()
include ActionView::Helpers::DateHelper
queue_with_priority :low_priority
application_attr :queue
def perform
RequestStore.store[:current_user] = User.system_user
TaskTimer.requires_processing.each do |task_timer|
# TODO: if this job's runtime gets too long, spawn individual jobs for each task timer.
process(task_timer)
end
TaskTimer.requires_cancelling.each do |task_timer|
cancel(task_timer)
end
datadog_report_runtime(metric_group_name: TaskTimerJob.name.underscore)
end
private
def process(task_timer)
# Calling ".with_lock" will block the current thread until
# no other threads have a lock on the row, and will reload
# the record after acquiring the lock.
task_timer.with_lock do
task_timer.attempted!
task_timer.task.when_timer_ends
task_timer.clear_error!
task_timer.processed!
end
rescue StandardError => error
# Ensure errors are sent to Sentry, but don't block the job from continuing.
# The next time the job runs, we'll process the unprocessed task timers again.
task_timer.update_error!(error.inspect)
capture_exception(error: error)
end
def cancel(task_timer)
task_timer.with_lock do
task_timer.canceled!
end
rescue StandardError => error
# Ensure errors are sent to Sentry, but don't block the job from continuing.
# The next time the job runs, we'll process the unprocessed task timers again.
task_timer.update_error!(error.inspect)
capture_exception(error: error)
end
end