Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Loading…

bug777670 - backfills slow jobs with time #743

Merged
merged 3 commits into from

2 participants

Peter Bengtsson Chris Lonnen
Peter Bengtsson
Owner

r?

Added tests that attempt to prove that it works to have backfillable jobs that specify a time and what happens if the last_success key is removed.

Chris Lonnen
Owner

... cannot be merged. please update and reopen.

Chris Lonnen lonnen closed this
Peter Bengtsson
Owner

nasty http://cl.ly/JlUA

BTw i prefer to leave it open. Closed to me means its done with. that no more is required or possible.

Peter Bengtsson peterbe reopened this
Chris Lonnen
Owner
socorro/cron/crontabber.py:299: undefined name 'utc'
socorro/cron/crontabber.py:536:27: E251 no spaces around keyword / parameter equals
Peter Bengtsson
Owner

My pep8 on socorro was way out of date. All fixed now for pep8 1.3.3

Chris Lonnen
Owner

:shipit:

Chris Lonnen lonnen merged commit 9da90ff into from
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Commits on Oct 19, 2012
  1. Peter Bengtsson
  2. Peter Bengtsson

    fixed merge conflict

    peterbe authored
  3. Peter Bengtsson

    fixed all pep8 nits

    peterbe authored
This page is out of date. Refresh to see the latest.
Showing with 297 additions and 147 deletions.
  1. +60 −49 socorro/cron/crontabber.py
  2. +237 −98 socorro/unittest/cron/test_crontabber.py
109 socorro/cron/crontabber.py
View
@@ -70,13 +70,16 @@ def main(self, function=None, once=True):
yield now
else:
# figure out when it was last run
- last_success = self.job_information.get('last_success',
- self.job_information.get('first_run'))
+ last_success = self.job_information.get(
+ 'last_success',
+ self.job_information.get('first_run')
+ )
if not last_success:
# either it has never run successfully or it was previously run
# before the 'first_run' key was added (legacy).
self.config.logger.warning(
- 'No previous last_success information available')
+ 'No previous last_success information available'
+ )
function(now)
yield now
else:
@@ -264,7 +267,7 @@ def _save_to_postgres(self):
connection.commit()
-def timesince(d, now=None): # pragma: no cover
+def timesince(d, now): # pragma: no cover
"""
Taken from django.utils.timesince
"""
@@ -280,12 +283,12 @@ def is_aware(v):
return v.tzinfo is not None and v.tzinfo.utcoffset(v) is not None
chunks = (
- (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
- (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
- (60 * 60 * 24 * 7, lambda n: ungettext('week', 'weeks', n)),
- (60 * 60 * 24, lambda n: ungettext('day', 'days', n)),
- (60 * 60, lambda n: ungettext('hour', 'hours', n)),
- (60, lambda n: ungettext('minute', 'minutes', n))
+ (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
+ (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
+ (60 * 60 * 24 * 7, lambda n: ungettext('week', 'weeks', n)),
+ (60 * 60 * 24, lambda n: ungettext('day', 'days', n)),
+ (60 * 60, lambda n: ungettext('hour', 'hours', n)),
+ (60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
@@ -294,7 +297,7 @@ def is_aware(v):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
- now = datetime.datetime.now(utc if is_aware(d) else None)
+ now = datetime.datetime.utcnow()
delta = now - d
# ignore microseconds
@@ -307,14 +310,16 @@ def is_aware(v):
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {
- 'number': count, 'type': name(count)}
+ 'number': count, 'type': name(count)
+ }
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {
- 'number': count2, 'type': name2(count2)}
+ 'number': count2, 'type': name2(count2)
+ }
return s
@@ -332,12 +337,11 @@ def _default_extra_extractor(list_element):
def classes_in_namespaces_converter_with_compression(
- reference_namespace={},
- template_for_namespace="class-%(name)s",
- list_splitter_fn=_default_list_splitter,
- class_extractor=_default_class_extractor,
- extra_extractor=_default_extra_extractor
- ):
+ reference_namespace={},
+ template_for_namespace="class-%(name)s",
+ list_splitter_fn=_default_list_splitter,
+ class_extractor=_default_class_extractor,
+ extra_extractor=_default_extra_extractor):
"""
parameters:
template_for_namespace - a template for the names of the namespaces
@@ -392,16 +396,20 @@ class InnerClassList(RequiredConfig):
# for each class in the class list
class_list = []
for namespace_index, class_list_element in enumerate(
- class_str_list):
+ class_str_list
+ ):
try:
a_class = class_converter(
- class_extractor(class_list_element))
+ class_extractor(class_list_element)
+ )
except AttributeError:
raise JobNotFoundError(class_list_element)
class_list.append((a_class.__name__, a_class))
# figure out the Namespace name
- namespace_name_dict = {'name': a_class.__name__,
- 'index': namespace_index}
+ namespace_name_dict = {
+ 'name': a_class.__name__,
+ 'index': namespace_index
+ }
namespace_name = template_for_namespace % namespace_name_dict
subordinate_namespace_names.append(namespace_name)
# create the new Namespace
@@ -447,19 +455,21 @@ def get_extra_as_options(input_str):
frequency, time_ = metadata
n = Namespace()
- n.add_option('frequency',
- doc='frequency',
- default=frequency,
- #from_string_converter=int
- exclude_from_print_conf=True,
- exclude_from_dump_conf=True
- )
- n.add_option('time',
- doc='time',
- default=time_,
- exclude_from_print_conf=True,
- exclude_from_dump_conf=True
- )
+ n.add_option(
+ 'frequency',
+ doc='frequency',
+ default=frequency,
+ #from_string_converter=int
+ exclude_from_print_conf=True,
+ exclude_from_dump_conf=True
+ )
+ n.add_option(
+ 'time',
+ doc='time',
+ default=time_,
+ exclude_from_print_conf=True,
+ exclude_from_dump_conf=True
+ )
return n
@@ -529,14 +539,13 @@ class CronTabber(App):
)
required_config.crontabber.add_option(
- 'jobs',
- default='',
- from_string_converter=
- classes_in_namespaces_converter_with_compression(
- reference_namespace=required_config.crontabber,
- list_splitter_fn=line_splitter,
- class_extractor=pipe_splitter,
- extra_extractor=get_extra_as_options
+ 'jobs',
+ default='',
+ from_string_converter=classes_in_namespaces_converter_with_compression(
+ reference_namespace=required_config.crontabber,
+ list_splitter_fn=line_splitter,
+ class_extractor=pipe_splitter,
+ extra_extractor=get_extra_as_options
)
)
@@ -663,8 +672,10 @@ def run_one(self, description, force=False):
# the description in this case is either the app_name or the full
# module/class reference
for class_name, job_class in self.config.crontabber.jobs.class_list:
- if (job_class.app_name == description or
- description == job_class.__module__ + '.' + job_class.__name__):
+ if (
+ job_class.app_name == description or
+ description == job_class.__module__ + '.' + job_class.__name__
+ ):
class_config = self.config.crontabber['class-%s' % class_name]
self._run_one(job_class, class_config, force=force)
return
@@ -776,9 +787,9 @@ def _log_run(self, class_, seconds, time_, last_success, now,
if exc_type:
tb = ''.join(traceback.format_tb(exc_tb))
info['last_error'] = {
- 'type': exc_type,
- 'value': str(exc_value),
- 'traceback': tb,
+ 'type': exc_type,
+ 'value': str(exc_value),
+ 'traceback': tb,
}
info['error_count'] = info.get('error_count', 0) + 1
else:
335 socorro/unittest/cron/test_crontabber.py
View
@@ -56,10 +56,10 @@ def test_loading_existing_file(self):
file1 = os.path.join(self.tempdir, 'file1.json')
stuff = {
- 'foo': 1,
- 'more': {
- 'bar': u'Bar'
- }
+ 'foo': 1,
+ 'more': {
+ 'bar': u'Bar'
+ }
}
json.dump(stuff, open(file1, 'w'))
db.load(file1)
@@ -77,8 +77,10 @@ def test_saving_new_file(self):
db['more'] = {'bar': u'Bar'}
db.save(file1)
structure = json.load(open(file1))
- self.assertEqual(structure,
- {u'foo': 1, u'more': {u'bar': u'Bar'}})
+ self.assertEqual(
+ structure,
+ {u'foo': 1, u'more': {u'bar': u'Bar'}}
+ )
# check that save doesn't actually change anything
self.assertEqual(db['foo'], 1)
@@ -134,7 +136,7 @@ def tearDown(self):
def test_basic_run_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.BasicJob|7d'
+ 'socorro.unittest.cron.test_crontabber.BasicJob|7d'
)
def fmt(d):
@@ -190,7 +192,7 @@ def fmt(d):
@mock.patch('socorro.cron.crontabber.utc_now')
def test_slow_run_job(self, mocked_utc_now, time_sleep):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.SlowJob|1h'
+ 'socorro.unittest.cron.test_crontabber.SlowJob|1h'
)
_sleeps = []
@@ -214,7 +216,7 @@ def mock_utc_now():
tab.run_all()
time_after = crontabber.utc_now()
time_taken = (time_after - time_before).seconds
- assert round(time_taken) == 1.0, time_taken
+ self.assertEqual(round(time_taken), 1.0)
# check that this was written to the JSON file
# and that the next_run is going to be 1 day from now
@@ -224,12 +226,13 @@ def mock_utc_now():
self.assertEqual(information['error_count'], 0)
self.assertEqual(information['last_error'], {})
self.assertTrue(information['next_run'].startswith(
- (time_before + datetime.timedelta(hours=1))
- .strftime('%Y-%m-%d %H:%M:%S')))
+ (time_before + datetime.timedelta(hours=1))
+ .strftime('%Y-%m-%d %H:%M:%S'))
+ )
def test_run_job_by_class_path(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.BasicJob|30m'
+ 'socorro.unittest.cron.test_crontabber.BasicJob|30m'
)
with config_manager.context() as config:
@@ -239,8 +242,8 @@ def test_run_job_by_class_path(self):
def test_basic_run_all(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooJob|3d\n'
- 'socorro.unittest.cron.test_crontabber.BarJob|4d'
+ 'socorro.unittest.cron.test_crontabber.FooJob|3d\n'
+ 'socorro.unittest.cron.test_crontabber.BarJob|4d'
)
with config_manager.context() as config:
@@ -276,7 +279,7 @@ def test_basic_run_all(self):
def test_run_into_error_first_time(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.TroubleJob|7d\n'
+ 'socorro.unittest.cron.test_crontabber.TroubleJob|7d\n'
)
with config_manager.context() as config:
@@ -315,9 +318,9 @@ def test_run_into_error_first_time(self):
def test_run_all_with_failing_dependency(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.TroubleJob|1d\n'
- 'socorro.unittest.cron.test_crontabber.SadJob|1d\n'
- 'socorro.unittest.cron.test_crontabber.BasicJob|1d'
+ 'socorro.unittest.cron.test_crontabber.TroubleJob|1d\n'
+ 'socorro.unittest.cron.test_crontabber.SadJob|1d\n'
+ 'socorro.unittest.cron.test_crontabber.BasicJob|1d'
)
with config_manager.context() as config:
@@ -357,7 +360,7 @@ def test_run_all_with_failing_dependency(self):
def test_run_all_basic_with_failing_dependency_without_errors(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.BarJob|1d'
+ 'socorro.unittest.cron.test_crontabber.BarJob|1d'
)
# the BarJob one depends on FooJob but suppose that FooJob
@@ -372,8 +375,8 @@ def test_run_all_basic_with_failing_dependency_without_errors(self):
def test_run_all_with_failing_dependency_without_errors_but_old(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooJob|1d\n'
- 'socorro.unittest.cron.test_crontabber.BarJob|1d'
+ 'socorro.unittest.cron.test_crontabber.FooJob|1d\n'
+ 'socorro.unittest.cron.test_crontabber.BarJob|1d'
)
# the BarJob one depends on FooJob but suppose that FooJob
# has run for but a very long time ago
@@ -394,8 +397,10 @@ def test_run_all_with_failing_dependency_without_errors_but_old(self):
infos = [x[0][0] for x in config.logger.info.call_args_list]
infos = [x for x in infos if x.startswith('Ran ')]
# obvious
- self.assertEqual(infos,
- ['Ran FooJob', 'Ran BarJob', 'Ran FooJob', 'Ran BarJob'])
+ self.assertEqual(
+ infos,
+ ['Ran FooJob', 'Ran BarJob', 'Ran FooJob', 'Ran BarJob']
+ )
# repeat
self._wind_clock(json_file, days=2)
@@ -411,8 +416,8 @@ def test_run_all_with_failing_dependency_without_errors_but_old(self):
def test_basic_run_job_with_hour(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.BasicJob|7d|03:00\n'
- 'socorro.unittest.cron.test_crontabber.FooJob|1:45'
+ 'socorro.unittest.cron.test_crontabber.BasicJob|7d|03:00\n'
+ 'socorro.unittest.cron.test_crontabber.FooJob|1:45'
)
with config_manager.context() as config:
@@ -430,10 +435,10 @@ def test_basic_run_job_with_hour(self):
def test_list_jobs(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.SadJob|5h\n'
- 'socorro.unittest.cron.test_crontabber.TroubleJob|1d\n'
- 'socorro.unittest.cron.test_crontabber.BasicJob|7d|03:00\n'
- 'socorro.unittest.cron.test_crontabber.FooJob|2d'
+ 'socorro.unittest.cron.test_crontabber.SadJob|5h\n'
+ 'socorro.unittest.cron.test_crontabber.TroubleJob|1d\n'
+ 'socorro.unittest.cron.test_crontabber.BasicJob|7d|03:00\n'
+ 'socorro.unittest.cron.test_crontabber.FooJob|2d'
)
with config_manager.context() as config:
@@ -447,11 +452,15 @@ def test_list_jobs(self):
sys.stdout = old_stdout
output = new_stdout.getvalue()
self.assertEqual(output.count('Class:'), 4)
- self.assertEqual(4,
- len(re.findall('App name:\s+(trouble|basic-job|foo|sad)',
- output, re.I)))
- self.assertEqual(4,
- len(re.findall('No previous run info', output, re.I)))
+ self.assertEqual(
+ 4,
+ len(re.findall('App name:\s+(trouble|basic-job|foo|sad)',
+ output, re.I))
+ )
+ self.assertEqual(
+ 4,
+ len(re.findall('No previous run info', output, re.I))
+ )
tab.run_all()
assert 'sad' not in tab.database
@@ -467,8 +476,10 @@ def test_list_jobs(self):
sys.stdout = old_stdout
output = new_stdout.getvalue()
# sad job won't be run since its depdendent keeps failing
- self.assertEqual(1,
- len(re.findall('No previous run info', output, re.I)))
+ self.assertEqual(
+ 1,
+ len(re.findall('No previous run info', output, re.I))
+ )
# split them up so that we can investigate each block of output
outputs = {}
@@ -489,8 +500,8 @@ def test_list_jobs(self):
def test_configtest_ok(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooJob|3d\n'
- 'socorro.unittest.cron.test_crontabber.BarJob|4d'
+ 'socorro.unittest.cron.test_crontabber.FooJob|3d\n'
+ 'socorro.unittest.cron.test_crontabber.BarJob|4d'
)
with config_manager.context() as config:
@@ -525,7 +536,7 @@ def test_configtest_definition_error(self):
def test_configtest_bad_frequency(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooJob|3e'
+ 'socorro.unittest.cron.test_crontabber.FooJob|3e'
)
with config_manager.context() as config:
@@ -545,8 +556,8 @@ def test_configtest_bad_frequency(self):
def test_configtest_bad_time(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooJob|24:59\n'
- 'socorro.unittest.cron.test_crontabber.BasicJob|23:60'
+ 'socorro.unittest.cron.test_crontabber.FooJob|24:59\n'
+ 'socorro.unittest.cron.test_crontabber.BasicJob|23:60'
)
with config_manager.context() as config:
@@ -566,7 +577,7 @@ def test_configtest_bad_time(self):
def test_configtest_bad_time_invariance(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooJob|3h|23:59'
+ 'socorro.unittest.cron.test_crontabber.FooJob|3h|23:59'
)
with config_manager.context() as config:
@@ -586,7 +597,7 @@ def test_configtest_bad_time_invariance(self):
def test_execute_postgres_based_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
)
with config_manager.context() as config:
@@ -595,17 +606,17 @@ def test_execute_postgres_based_job(self):
config.logger.info.assert_called_with('Ran PostgresSampleJob')
self.psycopg2().cursor().execute.assert_any_call(
- 'INSERT INTO test_cron_victim (time) VALUES (now())'
+ 'INSERT INTO test_cron_victim (time) VALUES (now())'
)
self.psycopg2().cursor().execute.assert_any_call(
- 'COMMIT'
+ 'COMMIT'
)
self.psycopg2().close.assert_called_with()
def test_execute_postgres_transaction_managed_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.'
- 'PostgresTransactionSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber.'
+ 'PostgresTransactionSampleJob|1d'
)
with config_manager.context() as config:
@@ -619,7 +630,7 @@ def test_execute_postgres_transaction_managed_job(self):
def test_execute_failing_postgres_based_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.BrokenPostgresSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber.BrokenPostgresSampleJob|1d'
)
with config_manager.context() as config:
@@ -632,12 +643,15 @@ def test_execute_failing_postgres_based_job(self):
self.assertTrue(self.psycopg2.called)
self.psycopg2().close.assert_called_with()
self.assertTrue(tab.database['broken-pg-job']['last_error'])
- self.assertTrue('ProgrammingError' in
- tab.database['broken-pg-job']['last_error']['traceback'])
+ self.assertTrue(
+ 'ProgrammingError' in
+ tab.database['broken-pg-job']['last_error']['traceback']
+ )
def test_own_required_config_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.OwnRequiredConfigSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber'
+ '.OwnRequiredConfigSampleJob|1d'
)
with config_manager.context() as config:
@@ -646,17 +660,18 @@ def test_own_required_config_job(self):
infos = [x[0][0] for x in config.logger.info.call_args_list]
infos = [x for x in infos if x.startswith('Ran ')]
self.assertTrue(
- 'Ran OwnRequiredConfigSampleJob(%r)' % 'bugz.mozilla.org'
- in infos
+ 'Ran OwnRequiredConfigSampleJob(%r)' % 'bugz.mozilla.org'
+ in infos
)
def test_own_required_config_job_overriding_config(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.OwnRequiredConfigSampleJob|1d',
- extra_value_source={
- 'crontabber.class-OwnRequiredConfigSampleJob.bugsy_url':
+ 'socorro.unittest.cron.test_crontabber'
+ '.OwnRequiredConfigSampleJob|1d',
+ extra_value_source={
+ 'crontabber.class-OwnRequiredConfigSampleJob.bugsy_url':
'bugs.peterbe.com'
- }
+ }
)
with config_manager.context() as config:
@@ -665,13 +680,13 @@ def test_own_required_config_job_overriding_config(self):
infos = [x[0][0] for x in config.logger.info.call_args_list]
infos = [x for x in infos if x.startswith('Ran ')]
self.assertTrue(
- 'Ran OwnRequiredConfigSampleJob(%r)' % 'bugs.peterbe.com'
- in infos
+ 'Ran OwnRequiredConfigSampleJob(%r)' % 'bugs.peterbe.com'
+ in infos
)
def test_automatic_backfill_basic_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.FooBackfillJob|1d'
+ 'socorro.unittest.cron.test_crontabber.FooBackfillJob|1d'
)
def fmt(d):
@@ -697,10 +712,12 @@ def fmt(d):
# now, pretend the last 2 days have failed
interval = datetime.timedelta(days=2)
- tab.database['foo-backfill']['first_run'] = \
- tab.database['foo-backfill']['first_run'] - interval
- tab.database['foo-backfill']['last_success'] = \
- tab.database['foo-backfill']['last_success'] - interval
+ tab.database['foo-backfill']['first_run'] = (
+ tab.database['foo-backfill']['first_run'] - interval
+ )
+ tab.database['foo-backfill']['last_success'] = (
+ tab.database['foo-backfill']['last_success'] - interval
+ )
tab.database.save(json_file)
self._wind_clock(json_file, days=1)
@@ -742,7 +759,8 @@ def test_backfilling_failling_midway(self):
"""
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.CertainDayHaterBackfillJob|1d'
+ 'socorro.unittest.cron.test_crontabber'
+ '.CertainDayHaterBackfillJob|1d'
)
with config_manager.context() as config:
tab = crontabber.CronTabber(config)
@@ -752,17 +770,20 @@ def test_backfilling_failling_midway(self):
# now, pretend the last 2 days have failed
interval = datetime.timedelta(days=2)
- tab.database[app_name]['first_run'] = \
- tab.database[app_name]['first_run'] - interval
- tab.database[app_name]['last_success'] = \
- tab.database[app_name]['last_success'] - interval
+ tab.database[app_name]['first_run'] = (
+ tab.database[app_name]['first_run'] - interval
+ )
+ tab.database[app_name]['last_success'] = (
+ tab.database[app_name]['last_success'] - interval
+ )
tab.database.save(json_file)
self._wind_clock(json_file, days=1)
tab._database = None
- CertainDayHaterBackfillJob.fail_on = \
- tab.database[app_name]['first_run'] + interval
+ CertainDayHaterBackfillJob.fail_on = (
+ tab.database[app_name]['first_run'] + interval
+ )
first_last_success = tab.database[app_name]['last_success']
tab.run_all()
@@ -773,7 +794,7 @@ def test_backfilling_failling_midway(self):
def test_backfilling_postgres_based_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.PGBackfillJob|1d'
+ 'socorro.unittest.cron.test_crontabber.PGBackfillJob|1d'
)
def fmt(d):
@@ -801,10 +822,12 @@ def fmt(d):
# now, pretend the last 2 days have failed
interval = datetime.timedelta(days=2)
- tab.database['pg-backfill']['first_run'] = \
- tab.database['pg-backfill']['first_run'] - interval
- tab.database['pg-backfill']['last_success'] = \
- tab.database['pg-backfill']['last_success'] - interval
+ tab.database['pg-backfill']['first_run'] = (
+ tab.database['pg-backfill']['first_run'] - interval
+ )
+ tab.database['pg-backfill']['last_success'] = (
+ tab.database['pg-backfill']['last_success'] - interval
+ )
tab.database.save(json_file)
self._wind_clock(json_file, days=1)
@@ -831,8 +854,8 @@ def test_run_with_excess_whitespace(self):
# this test asserts a found bug where excess newlines
# caused configuration exceptions
config_manager, json_file = self._setup_config_manager(
- '\n \n'
- ' socorro.unittest.cron.test_crontabber.BasicJob|7d\n\t \n'
+ '\n \n'
+ ' socorro.unittest.cron.test_crontabber.BasicJob|7d\n\t \n'
)
with config_manager.context() as config:
@@ -880,7 +903,7 @@ def test_backfilling_with_configured_time_slow_job(self,
think 24 hours hasn't gone since the last time. Phew!
"""
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.SlowBackfillJob|1d|18:00'
+ 'socorro.unittest.cron.test_crontabber.SlowBackfillJob|1d|18:00'
)
SlowBackfillJob.times_used = []
@@ -942,6 +965,111 @@ def mock_utc_now():
self.assertTrue('18:01:01' in information['last_run'])
self.assertTrue('18:00:00' in information['last_success'])
+ @mock.patch('socorro.cron.crontabber.utc_now')
+ @mock.patch('time.sleep')
+ def test_slow_backfilled_timed_daily_job(self, time_sleep, mocked_utc_now):
+ config_manager, json_file = self._setup_config_manager(
+ 'socorro.unittest.cron.test_crontabber.SlowBackfillJob|1d|10:00'
+ )
+
+ SlowBackfillJob.times_used = []
+
+ _extra_time = []
+
+ def mocked_sleep(seconds):
+ _extra_time.append(datetime.timedelta(seconds=seconds))
+
+ def mock_utc_now():
+ n = utc_now()
+ for e in _extra_time:
+ n += e
+ return n
+
+ time_sleep.side_effect = mocked_sleep
+ mocked_utc_now.side_effect = mock_utc_now
+
+ with config_manager.context() as config:
+ tab = crontabber.CronTabber(config)
+ time_before = crontabber.utc_now()
+ tab.run_all()
+ assert len(SlowBackfillJob.times_used) == 1
+ time_after = crontabber.utc_now()
+ # double-checking
+ assert (time_after - time_before).seconds == 1
+
+ structure = json.load(open(json_file))
+ information = structure['slow-backfill']
+ self.assertTrue(information['last_success'])
+ self.assertTrue(not information['last_error'])
+ # easy
+ self.assertTrue('10:00:00' in information['next_run'])
+ self.assertEqual(information['first_run'], information['last_run'])
+
+ # pretend one day passes
+ _extra_time.append(datetime.timedelta(days=1))
+ time_later = crontabber.utc_now()
+ assert (time_later - time_after).days == 1
+ assert (time_later - time_after).seconds == 0
+ assert (time_later - time_before).days == 1
+ assert (time_later - time_before).seconds == 1
+
+ tab.run_all()
+ self.assertEqual(len(SlowBackfillJob.times_used), 2)
+ structure = json.load(open(json_file))
+ information = structure['slow-backfill']
+
+ # another day passes
+ _extra_time.append(datetime.timedelta(days=1))
+ # also, simulate that it starts a second earlier this time
+ _extra_time.append(-datetime.timedelta(seconds=1))
+ tab.run_all()
+ assert len(SlowBackfillJob.times_used) == 3
+ structure = json.load(open(json_file))
+ information = structure['slow-backfill']
+
+ @mock.patch('socorro.cron.crontabber.utc_now')
+ @mock.patch('time.sleep')
+ def test_slow_backfilled_timed_daily_job_first_failure(self,
+ time_sleep,
+ mocked_utc_now):
+ config_manager, json_file = self._setup_config_manager(
+ 'socorro.unittest.cron.test_crontabber.SlowBackfillJob|1d|10:00'
+ )
+
+ SlowBackfillJob.times_used = []
+
+ _extra_time = []
+
+ def mocked_sleep(seconds):
+ _extra_time.append(datetime.timedelta(seconds=seconds))
+
+ def mock_utc_now():
+ n = utc_now()
+ for e in _extra_time:
+ n += e
+ return n
+
+ time_sleep.side_effect = mocked_sleep
+ mocked_utc_now.side_effect = mock_utc_now
+
+ with config_manager.context() as config:
+ tab = crontabber.CronTabber(config)
+ tab.run_all()
+ self.assertEqual(len(SlowBackfillJob.times_used), 1)
+
+ db = crontabber.JSONJobDatabase()
+ db.load(json_file)
+ del db['slow-backfill']['last_success']
+ db.save(json_file)
+
+ _extra_time.append(datetime.timedelta(days=1))
+ _extra_time.append(-datetime.timedelta(seconds=1))
+
+ with config_manager.context() as config:
+ tab = crontabber.CronTabber(config)
+ tab.run_all()
+ self.assertEqual(len(SlowBackfillJob.times_used), 2)
+
#==============================================================================
@attr(integration='postgres') # for nosetests
@@ -985,7 +1113,7 @@ def tearDown(self):
def test_postgres_job(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
)
cur = self.conn.cursor()
@@ -1015,14 +1143,18 @@ def test_postgres_job(self):
def test_postgres_job_with_state_loaded_from_postgres_first(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
)
cur = self.conn.cursor()
tomorrow = utc_now() + datetime.timedelta(days=1)
- information = {'sample-pg-job': {
- 'next_run': tomorrow.strftime(crontabber.JSONJobDatabase._date_fmt),
- }}
+ information = {
+ 'sample-pg-job': {
+ 'next_run': tomorrow.strftime(
+ crontabber.JSONJobDatabase._date_fmt
+ ),
+ }
+ }
information_json = json.dumps(information)
cur.execute('update crontabber_state set state=%s',
(information_json,))
@@ -1038,8 +1170,10 @@ def test_postgres_job_with_state_loaded_from_postgres_first(self):
def test_postgres_job_with_broken(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.BrokenPostgresSampleJob|1d\n'
- 'socorro.unittest.cron.test_crontabber.PostgresSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber'
+ '.BrokenPostgresSampleJob|1d\n'
+ 'socorro.unittest.cron.test_crontabber'
+ '.PostgresSampleJob|1d'
)
cur = self.conn.cursor()
@@ -1073,7 +1207,8 @@ def test_postgres_job_with_broken(self):
def test_postgres_job_with_backfill_basic(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.PostgresBackfillSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber'
+ '.PostgresBackfillSampleJob|1d'
)
cur = self.conn.cursor()
@@ -1093,7 +1228,8 @@ def test_postgres_job_with_backfill_basic(self):
def test_postgres_job_with_backfill_3_days_back(self):
config_manager, json_file = self._setup_config_manager(
- 'socorro.unittest.cron.test_crontabber.PostgresBackfillSampleJob|1d'
+ 'socorro.unittest.cron.test_crontabber'
+ '.PostgresBackfillSampleJob|1d'
)
def fmt(d):
@@ -1134,10 +1270,12 @@ def fmt(d):
# now, pretend the last 2 days have failed
interval = datetime.timedelta(days=2)
- tab.database[app_name]['first_run'] = \
- tab.database[app_name]['first_run'] - interval
- tab.database[app_name]['last_success'] = \
- tab.database[app_name]['last_success'] - interval
+ tab.database[app_name]['first_run'] = (
+ tab.database[app_name]['first_run'] - interval
+ )
+ tab.database[app_name]['last_success'] = (
+ tab.database[app_name]['last_success'] - interval
+ )
tab.database.save(json_file)
self._wind_clock(json_file, days=1)
@@ -1208,6 +1346,7 @@ def run(self):
time.sleep(1) # time.sleep() is a mock function by the way
super(SlowJob, self).run()
+
class TroubleJob(_Job):
app_name = 'trouble'
@@ -1261,8 +1400,8 @@ class OwnRequiredConfigSampleJob(_Job):
)
def run(self):
- self.config.logger.info("Ran %s(%r)" %
- (self.__class__.__name__, self.config.bugsy_url)
+ self.config.logger.info(
+ "Ran %s(%r)" % (self.__class__.__name__, self.config.bugsy_url)
)
@@ -1272,7 +1411,7 @@ def run(self, date):
assert isinstance(date, datetime.datetime)
assert self.app_name
self.config.logger.info(
- "Ran %s(%s, %s)" % (self.__class__.__name__, date, id(date))
+ "Ran %s(%s, %s)" % (self.__class__.__name__, date, id(date))
)
@@ -1287,7 +1426,7 @@ class CertainDayHaterBackfillJob(_BackfillJob):
def run(self, date):
if (self.fail_on
- and date.strftime('%m%d') == self.fail_on.strftime('%m%d')):
+ and date.strftime('%m%d') == self.fail_on.strftime('%m%d')):
raise Exception("bad date!")
@@ -1317,7 +1456,7 @@ def run(self, connection, date):
# And since the winding back in the test is "unnatural" the numbers
# in the dates are actually the same but the instances are different
self.config.logger.info(
- "Ran %s(%s, %r)" % (self.__class__.__name__, date, id(date))
+ "Ran %s(%s, %r)" % (self.__class__.__name__, date, id(date))
)
@@ -1331,5 +1470,5 @@ def run(self, connection, date):
# need this because this is not a TransactionManaged subclass
cursor.execute('COMMIT')
self.config.logger.info(
- "Ran %s(%s, %r)" % (self.__class__.__name__, date, id(date))
+ "Ran %s(%s, %r)" % (self.__class__.__name__, date, id(date))
)
Something went wrong with that request. Please try again.