Skip to content

Commit

Permalink
Merge pull request #570 from oldpatricka/fix_leaky_ha
Browse files Browse the repository at this point in the history
Fix HA tests that leak processes
  • Loading branch information
labisso committed Feb 14, 2013
2 parents f114d32 + 3d90a58 commit 950212e
Showing 1 changed file with 44 additions and 27 deletions.
71 changes: 44 additions & 27 deletions ion/agents/cei/test/test_haagent.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,14 @@ def _kill_haagent(self):
self.container.terminate_process(self._haa_pid)

def tearDown(self):


new_policy = {'preserve_n': 0}
self.haa_client.reconfigure_policy(new_policy)

self.assertEqual(len(self.get_running_procs()), 0)
self.await_ha_state('STEADY')

self.waiter.stop()
try:
self._kill_haagent()
Expand Down Expand Up @@ -546,6 +554,23 @@ def _stop_webserver(self):
gevent.sleep(2)
self._web_glet.kill()

def await_ha_state(self, want_state, timeout=20):

for i in range(0, timeout):
try:
status = self.haa_client.status().result
if status == want_state:
return
else:
procs = self.get_running_procs()
num_procs = len(procs)
log.debug("assert wants state %s, got state %s, with %s procs" % (want_state,status, num_procs))
except Exception:
log.exception("Problem getting HA status, trying again...")
gevent.sleep(1)

raise Exception("Took more than %s to get to ha state %s" % (timeout, want_state))

@needs_epu
def setUp(self):
self._start_container()
Expand Down Expand Up @@ -619,6 +644,22 @@ def setUp(self):
self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)

def tearDown(self):
new_policy = { 'metric': 'app_attributes:ml',
'sample_period': 600,
'sample_function': 'Average',
'cooldown_period': 0,
'scale_up_threshold': 2.0,
'scale_up_n_processes': 1,
'scale_down_threshold': 1.0,
'scale_down_n_processes': 1,
'maximum_processes': 0,
'minimum_processes': 0,
}
self.haa_client.reconfigure_policy(new_policy)

self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED)
self.assertEqual(len(self.get_running_procs()), 0)

self.waiter.stop()
self.container.terminate_process(self._haa_pid)
self._stop_webserver()
Expand Down Expand Up @@ -654,15 +695,7 @@ def test_sensor_policy(self):

self.assertEqual(len(self.get_running_procs()), 1)

for i in range(0, 5):
status = self.haa_client.status().result
try:
self.assertEqual(status, 'STEADY')
break
except:
gevent.sleep(1)
else:
assert False, "HA Service took too long to get to state STEADY"
self.await_ha_state('STEADY')

# Set ml for each proc such that we scale up
upids = self._get_managed_upids()
Expand All @@ -684,15 +717,7 @@ def test_sensor_policy(self):

self.assertEqual(len(self.get_running_procs()), 2)

for i in range(0, 5):
status = self.haa_client.status().result
try:
self.assertEqual(status, 'STEADY')
break
except:
gevent.sleep(1)
else:
assert False, "HA Service took too long to get to state STEADY"
self.await_ha_state('STEADY')

# Set ml so we scale down
upids = self._get_managed_upids()
Expand All @@ -705,12 +730,4 @@ def test_sensor_policy(self):

self.assertEqual(len(self.get_running_procs()), 1)

for i in range(0, 5):
status = self.haa_client.status().result
try:
self.assertEqual(status, 'STEADY')
break
except:
gevent.sleep(1)
else:
assert False, "HA Service took too long to get to state STEADY"
self.await_ha_state('STEADY')

0 comments on commit 950212e

Please sign in to comment.