Skip to content

Commit

Permalink
Merge pull request #550 from openvstorage/AT548-add-workers-restart-w…
Browse files Browse the repository at this point in the history
…orkaround

Add temporary workers restart when doing a forced restart of a single…
  • Loading branch information
Philippe committed Jun 29, 2017
2 parents 862e888 + fb126d1 commit b95b4a0
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 14 deletions.
47 changes: 47 additions & 0 deletions ci/scenario_helpers/fwk_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# Copyright (C) 2016 iNuron NV
#
# This file is part of Open vStorage Open Source Edition (OSE),
# as available from
#
# http://www.openvstorage.org and
# http://www.openvstorage.com.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
# as published by the Free Software Foundation, in version 3 as it comes
# in the LICENSE.txt file of the Open vStorage OSE distribution.
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.

from ovs.dal.lists.storagerouterlist import StorageRouterList
from ovs.extensions.generic.sshclient import SSHClient
from ovs.log.log_handler import LogHandler


class FwkHandler(object):
"""
Class handling fwk actions
"""
LOGGER = LogHandler.get(source='scenario_helpers', name='fwk_handler')

@classmethod
def restart(cls, srs, logger=LOGGER):
for sr in srs:
logger.info("Restarting ovs-workers on {0}".format(sr.ip))
client = SSHClient(str(sr.ip), username='root', cached=False)
client.run(['systemctl', 'restart', 'ovs-workers.service'])

@classmethod
def restart_masters(cls):
cls.restart([sr for sr in StorageRouterList.get_masters()])


@classmethod
def restart_slaves(cls):
cls.restart([sr for sr in StorageRouterList.get_slaves()])

@classmethod
def restart_all(cls):
cls.restart_masters()
cls.restart_slaves()
15 changes: 10 additions & 5 deletions ci/scenarios/hypervisor/automated_ha/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from ci.autotests import gather_results
from ci.scenario_helpers.ci_constants import CIConstants
from ci.scenario_helpers.data_writing import DataWriter
from ci.scenario_helpers.fwk_handler import FwkHandler
from ci.scenario_helpers.threading_handlers import ThreadingHandler
from ci.scenario_helpers.vm_handler import VMHandler
from ovs.extensions.generic.remote import remote
Expand Down Expand Up @@ -163,8 +164,8 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO):
cls.run_test(cluster_info=cluster_info, vm_info=vm_info)
finally:
for vm_name, vm_object in vm_info.iteritems():
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.destroy(vm_name)
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.undefine(vm_name)
# cls.test_ha_fio(fio_bin_path, cluster_info, is_ee, api)

Expand Down Expand Up @@ -257,12 +258,14 @@ def run_test(cls, vm_info, cluster_info, logger=LOGGER):
logger.error('Running the test for configuration {0} has failed because {1}'.format(configuration, str(ex)))
failed_configurations.append({'configuration': configuration, 'reason': str(ex)})
finally:
for thread_category, thread_collection in threads['evented'].iteritems():
ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
if vm_downed is True:
VMHandler.start_vm(parent_hypervisor, vm_to_stop)
logger.debug('Started {0}'.format(vm_to_stop))
SystemHelper.idle_till_ovs_is_up(source_storagedriver.storage_ip, **cls.get_shell_user())
for thread_category, thread_collection in threads['evented'].iteritems():
ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
# @TODO: Remove when https://github.com/openvstorage/integrationtests/issues/540 is fixed
FwkHandler.restart_all()
for vm_name, vm_data in vm_info.iteritems():
for screen_name in vm_data.get('screen_names', []):
logger.debug('Stopping screen {0} on {1}.'.format(screen_name, vm_data['client'].ip))
Expand Down Expand Up @@ -373,14 +376,16 @@ def test_ha_fio(cls, fio_bin_path, cluster_info, is_ee, api, disk_amount=1, tim
except Exception as ex:
failed_configurations.append({'configuration': configuration, 'reason': str(ex)})
finally:
for thread_category, thread_collection in threads['evented'].iteritems():
ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
if vm_downed is True:
VMHandler.start_vm(parent_hypervisor, vm_to_stop)
SystemHelper.idle_till_ovs_is_up(source_storagedriver.storage_ip, **cls.get_shell_user())
# @TODO: Remove when https://github.com/openvstorage/integrationtests/issues/540 is fixed
FwkHandler.restart_all()
if screen_names:
for screen_name in screen_names:
compute_client.run(['screen', '-S', screen_name, '-X', 'quit'])
for thread_category, thread_collection in threads['evented'].iteritems():
ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
for vdisk in vdisk_info.values():
VDiskRemover.remove_vdisk(vdisk.guid, api)
assert len(failed_configurations) == 0, 'Certain configuration failed: {0}'.format(' '.join(failed_configurations))
Expand Down
3 changes: 1 addition & 2 deletions ci/scenarios/hypervisor/live_migrate_vm_test-exclude/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,8 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO):
cls.live_migrate(vm_info, cluster_info, volume_amount, hypervisor_info)
finally:
for vm_name, vm_object in vm_info.iteritems():
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
for vm_name in vm_info.keys():
source_hypervisor.sdk.destroy(vm_name)
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
source_hypervisor.sdk.undefine(vm_name)

@classmethod
Expand Down
13 changes: 8 additions & 5 deletions ci/scenarios/vDisk/advanced_dtl_vdisk_test/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from ci.autotests import gather_results
from ci.scenario_helpers.ci_constants import CIConstants
from ci.scenario_helpers.data_writing import DataWriter
from ci.scenario_helpers.fwk_handler import FwkHandler
from ci.scenario_helpers.threading_handlers import ThreadingHandler
from ci.scenario_helpers.vm_handler import VMHandler
from ovs.extensions.generic.remote import remote
Expand All @@ -34,7 +35,7 @@

class AdvancedDTLTester(CIConstants):
"""
Exercice HA with a VM via edge & KVM
Trigger HA with a VM via edge & KVM
Required packages: qemu-kvm libvirt0 python-libvirt virtinst genisoimage
Required commands after ovs installation and required packages: usermod -a -G ovs libvirt-qemu
Expand Down Expand Up @@ -113,8 +114,8 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO):
cls.run_test(vm_info=vm_info, cluster_info=cluster_info)
finally:
for vm_name, vm_object in vm_info.iteritems():
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.destroy(vm_name)
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.undefine(vm_name)

@classmethod
Expand Down Expand Up @@ -220,7 +221,7 @@ def run_test(cls, vm_info, cluster_info, logger=LOGGER):
vm_data['client'].run(['dd', 'if=/dev/urandom', 'of={0}'.format(cls.VM_RANDOM), 'bs=1M', 'count=2'])
vm_data['client'].run(['md5sum', cls.VM_RANDOM])

logger.error("Starting to stop proxy services")
logger.info("Stopping proxy services")
for proxy in source_std.alba_proxies:
ServiceManager.restart_service(proxy.service.name, client=source_client)

Expand Down Expand Up @@ -276,12 +277,14 @@ def run_test(cls, vm_info, cluster_info, logger=LOGGER):
assert len(unmatching_checksum_vms) == 0, 'Not all data was read from the DTL. Checksums do not line up for {}'.format(', '.join(unmatching_checksum_vms))
logger.info('DTL is working correctly!')
finally:
for thread_category, thread_collection in threads['evented'].iteritems():
ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
if vm_downed is True:
VMHandler.start_vm(parent_hypervisor, vm_to_stop)
logger.debug('Started {0}'.format(vm_to_stop))
SystemHelper.idle_till_ovs_is_up(source_std.storage_ip, **cls.get_shell_user())
for thread_category, thread_collection in threads['evented'].iteritems():
ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore'])
# @TODO: Remove when https://github.com/openvstorage/integrationtests/issues/540 is fixed
FwkHandler.restart_all()
for vm_name, vm_data in vm_info.iteritems():
for screen_name in vm_data.get('screen_names', []):
logger.debug('Stopping screen {0} on {1}.'.format(screen_name, vm_data['client'].ip))
Expand Down
2 changes: 1 addition & 1 deletion ci/scenarios/vDisk/data_corruption_reg_test/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO):
cls.run_test(storagedriver=storagedriver, vm_info=vm_info)
finally:
for vm_name, vm_object in vm_info.iteritems():
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.destroy(vm_name)
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.undefine(vm_name)

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion ci/scenarios/vPool/mds_regression/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,8 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO):
api=api)
finally:
for vm_name, vm_object in vm_info.iteritems():
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.destroy(vm_name)
VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api)
computenode_hypervisor.sdk.undefine(vm_name)

@classmethod
Expand Down

0 comments on commit b95b4a0

Please sign in to comment.