From 783e9b2e13278d9e727054e8937f6f4b65315877 Mon Sep 17 00:00:00 2001 From: Joe Julian Date: Wed, 17 Feb 2016 12:35:29 -0800 Subject: [PATCH] Rework tests and fix reverse peering with gluster 3.7 Reworked the test structure design for the gluster module and state so I could create a test that mimiced the behavior difference between 3.7 and prior versions when reverse probing by ip a server that already exists. Added the additional data given by gluster 3.7 to the output of glusterfs.list_peers to allow us to compare a requested peering against a host's aliases. Fixes #30932 --- salt/modules/glusterfs.py | 14 +- salt/states/glusterfs.py | 62 +++-- tests/unit/modules/glusterfs_test.py | 400 ++++++++++++++++++++++----- tests/unit/states/glusterfs_test.py | 122 +++++--- 4 files changed, 469 insertions(+), 129 deletions(-) diff --git a/salt/modules/glusterfs.py b/salt/modules/glusterfs.py index afd61fa73d81..5ebb9272bf07 100644 --- a/salt/modules/glusterfs.py +++ b/salt/modules/glusterfs.py @@ -68,9 +68,10 @@ def _gluster_xml(cmd): def _etree_to_dict(t): - if len(t.getchildren()) > 0: + list_t = list(t) + if len(list_t) > 0: d = {} - for child in t.getchildren(): + for child in list_t: d[child.tag] = _etree_to_dict(child) else: d = t.text @@ -117,7 +118,10 @@ def list_peers(): ''' root = _gluster_xml('peer status') - result = [x.find('hostname').text for x in _iter(root, 'peer')] + result = {} + for et_peer in _iter(root, 'peer'): + result.update({et_peer.find('hostname').text: [ + x.text for x in _iter(et_peer.find('hostnames'), 'hostname')]}) if len(result) == 0: return None else: @@ -161,7 +165,7 @@ def peer(name): cmd = 'peer probe {0}'.format(name) op_result = { - "exitval": _gluster_xml(cmd).find('opRet').text, + "exitval": _gluster_xml(cmd).find('opErrno').text, "output": _gluster_xml(cmd).find('output').text } return op_result @@ -349,7 +353,7 @@ def info(name): for i, brick in enumerate(_iter(volume, 'brick'), start=1): brickkey = 'brick{0}'.format(i) bricks[brickkey] = {'path': brick.text} - for child in brick.getchildren(): + for child in list(brick): if not child.tag == 'name': bricks[brickkey].update({child.tag: child.text}) for k, v in brick.items(): diff --git a/salt/states/glusterfs.py b/salt/states/glusterfs.py index bb9887253726..73ab5428d4d5 100644 --- a/salt/states/glusterfs.py +++ b/salt/states/glusterfs.py @@ -11,9 +11,24 @@ # Import salt libs import salt.utils.cloud as suc +from salt.exceptions import SaltCloudException log = logging.getLogger(__name__) +RESULT_CODES = [ + 'Peer {0} added successfully.', + 'Probe on localhost not needed', + 'Host {0} is already in the peer group', + 'Host {0} is already part of another cluster', + 'Volume on {0} conflicts with existing volumes', + 'UUID of {0} is the same as local uuid', + '{0} responded with "unknown peer". This could happen if {0} doesn\'t have localhost defined', + 'Failed to add peer. Information on {0}\'s logs', + 'Cluster quorum is not met. Changing peers is not allowed.', + 'Failed to update list of missed snapshots from {0}', + 'Conflict comparing list of snapshots from {0}', + 'Peer is already being detached from cluster.'] + def __virtual__(): ''' @@ -48,38 +63,40 @@ def peered(name): 'comment': '', 'result': False} + try: + suc.check_name(name, 'a-zA-Z0-9._-') + except SaltCloudException as e: + ret['comment'] = 'Invalid characters in peer name.' + ret['result'] = False + return ret + peers = __salt__['glusterfs.list_peers']() if peers: - if name in peers: + if name in peers or any([name in peers[x] for x in peers]): ret['result'] = True ret['comment'] = 'Host {0} already peered'.format(name) return ret - elif __opts__['test']: - ret['comment'] = 'Peer {0} will be added.'.format(name) - ret['result'] = None - return ret - - if suc.check_name(name, 'a-zA-Z0-9._-'): - ret['comment'] = 'Invalid characters in peer name.' - ret['result'] = False - return ret - if 'output' in __salt__['glusterfs.peer'](name): - ret['comment'] = __salt__['glusterfs.peer'](name)['output'] - else: - ret['comment'] = '' + result = __salt__['glusterfs.peer'](name) + ret['comment'] = '' + if 'exitval' in result: + if int(result['exitval']) <= len(RESULT_CODES): + ret['comment'] = RESULT_CODES[int(result['exitval'])].format(name) + else: + if 'comment' in result: + ret['comment'] = result['comment'] newpeers = __salt__['glusterfs.list_peers']() - #if newpeers was null, we know something didn't work. - if newpeers and name in newpeers: + # if newpeers was null, we know something didn't work. + if newpeers and name in newpeers or any([name in newpeers[x] for x in newpeers]): ret['result'] = True ret['changes'] = {'new': newpeers, 'old': peers} - #In case the hostname doesn't have any periods in it + # In case the hostname doesn't have any periods in it elif name == socket.gethostname(): ret['result'] = True return ret - #In case they have a hostname like "example.com" + # In case they have a hostname like "example.com" elif name == socket.gethostname().split('.')[0]: ret['result'] = True return ret @@ -234,9 +251,9 @@ def add_volume_bricks(name, bricks): - host2:/srv/gluster/drive3 ''' ret = {'name': name, - 'changes': {}, - 'comment': '', - 'result': False} + 'changes': {}, + 'comment': '', + 'result': False} current_bricks = __salt__['glusterfs.status'](name) @@ -257,7 +274,8 @@ def add_volume_bricks(name, bricks): old_bricks = current_bricks new_bricks = __salt__['glusterfs.status'](name) ret['result'] = True - ret['changes'] = {'new': list(new_bricks['bricks'].keys()), 'old': list(old_bricks['bricks'].keys())} + ret['changes'] = {'new': list(new_bricks['bricks'].keys()), 'old': list( + old_bricks['bricks'].keys())} return ret if 'Bricks already in volume' in add_bricks: diff --git a/tests/unit/modules/glusterfs_test.py b/tests/unit/modules/glusterfs_test.py index cabac4ad7578..6dd705fb249c 100644 --- a/tests/unit/modules/glusterfs_test.py +++ b/tests/unit/modules/glusterfs_test.py @@ -24,12 +24,176 @@ # Globals glusterfs.__salt__ = {} + +class GlusterResults(object): + ''' This class holds the xml results from gluster cli transactions ''' + + class v34(object): + ''' This is for version 3.4 results ''' + + class list_peers(object): + ''' results from "peer status" ''' + pass + + class peer_probe(object): + fail_cant_connect = fail_bad_hostname = '\n'.join([ + '', + '', + ' -1', + ' 107', + ' Probe returned with unknown errno 107', + '', + '']) + + success_self = '\n'.join([ + '' + ' ', + ' 0', + ' 1', + ' (null)', + ' success: on localhost not needed', + '', + '']) + success_other = '\n'.join([ + '' + ' ', + ' 0', + ' 0', + ' (null)', + ' success', + '', + '']) + success_hostname_after_ip = success_other + success_ip_after_hostname = success_other + success_already_peer = { + 'ip': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' (null)', + ' success: host 10.0.0.2 port 24007 already in peer list', + '', + '']), + 'hostname': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' (null)', + ' success: host server2 port 24007 already in peer list', + '', + ''])} + success_reverse_already_peer = { + 'ip': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' (null)', + ' success: host 10.0.0.1 port 24007 already in peer list', + '', + '']), + 'hostname': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' (null)', + ' success: host server1 port 24007 already in peer list', + '', + ''])} + success_first_hostname_from_second_first_time = success_other + success_first_hostname_from_second_second_time = success_reverse_already_peer[ + 'hostname'] + success_first_ip_from_second_first_time = success_reverse_already_peer[ + 'ip'] + + class v37(object): + + class peer_probe(object): + fail_cant_connect = fail_bad_hostname = '\n'.join([ + '', + '', + ' -1', + ' 107', + ' Probe returned with Transport endpoint is not connected', + '', + '']) + success_self = '\n'.join([ + '' + ' ', + ' 0', + ' 1', + ' ', + ' Probe on localhost not needed', + '', + '']) + success_other = '\n'.join([ + '' + ' ', + ' 0', + ' 0', + ' ', + ' ', + '', + '']) + success_hostname_after_ip = success_other + success_ip_after_hostname = success_other + success_already_peer = { + 'ip': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' ', + ' Host 10.0.0.2 port 24007 already in peer list', + '', + '']), + 'hostname': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' ', + ' Host server2 port 24007 already in peer list', + '', + ''])} + success_reverse_already_peer = { + 'ip': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' ', + ' Host 10.0.0.1 port 24007 already in peer list', + '', + '']), + 'hostname': '\n'.join([ + '' + ' ', + ' 0', + ' 2', + ' ', + ' Host server1 port 24007 already in peer list', + '', + ''])} + success_first_hostname_from_second_first_time = success_reverse_already_peer[ + 'hostname'] + success_first_ip_from_second_first_time = success_other + success_first_ip_from_second_second_time = success_reverse_already_peer[ + 'ip'] + xml_peer_present = """ 0 node02 + + node02.domain.dom + 10.0.0.2 + """ @@ -147,45 +311,6 @@ """ -xml_peer_probe_success = """ - - - 0 - 0 - - - -""" - -xml_peer_probe_fail_already_member = """ - - - 0 - 2 - - Host salt port 24007 already in peer list - -""" - -xml_peer_probe_fail_localhost = """ - - - 0 - 1 - - Probe on localhost not needed - -""" - -xml_peer_probe_fail_cant_connect = """ - - - -1 - 107 - Probe returned with Transport endpoint is not connected - -""" - xml_command_success = """ @@ -211,13 +336,16 @@ class GlusterfsTestCase(TestCase): ''' # 'list_peers' function tests: 1 + maxDiff = None + def test_list_peers(self): ''' Test if it return a list of gluster peers ''' mock = MagicMock(return_value=xml_peer_present) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertListEqual(glusterfs.list_peers(), ['node02']) + self.assertDictEqual(glusterfs.list_peers(), { + 'node02': ['node02.domain.dom', '10.0.0.2']}) mock = MagicMock(return_value=xml_command_success) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): @@ -230,29 +358,139 @@ def test_peer(self): Test if it adds another node into the peer list. ''' - # Peers can be added successfully, already present, be the localhost, or not be connected. - mock = MagicMock(return_value=xml_peer_probe_success) + # invalid characters + mock = MagicMock(return_value=True) + with patch.object(suc, 'check_name', mock): + self.assertRaises(SaltInvocationError, glusterfs.peer, 'a') + # version 3.4 + # by hostname + # peer failed unknown hostname + # peer failed can't connect + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.fail_cant_connect) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertRaises(CommandExecutionError, glusterfs.peer, 'server2') + # peer self + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_self) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server1'), + {'exitval': '1', 'output': 'success: on localhost not needed'}) + # peer added + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_other) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server2'), + {'exitval': '0', 'output': 'success'}) + # peer already member + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname']) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server2'), + {'exitval': '2', 'output': 'success: host server2 port 24007 already in peer list'}) + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_already_peer['ip']) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.2'), + {'exitval': '2', 'output': 'success: host 10.0.0.2 port 24007 already in peer list'}) + # peer in reverse (probe server1 from server2) + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_first_hostname_from_second_first_time) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server1'), + {'exitval': '0', 'output': 'success'}) + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_first_hostname_from_second_second_time) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server1'), + {'exitval': '2', 'output': 'success: host server1 port 24007 already in peer list'}) + # peer in reverse using ip address instead of hostname + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_reverse_already_peer['ip']) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.1'), + {'exitval': '2', 'output': 'success: host 10.0.0.1 port 24007 already in peer list'}) + # by ip address + # peer self + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_self) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.1'), + {'exitval': '1', 'output': 'success: on localhost not needed'}) + # peer added + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_other) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.2'), + {'exitval': '0', 'output': 'success'}) + # peer already member + mock = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_already_peer['ip']) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.2'), + {'exitval': '2', 'output': 'success: host 10.0.0.2 port 24007 already in peer list'}) + # version 3.7 + # peer failed unknown hostname + # peer failed can't connect + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.fail_cant_connect) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertRaises(CommandExecutionError, glusterfs.peer, 'server2') + # peer self + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_self) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('salt'), + self.assertEqual(glusterfs.peer('server1'), + {'exitval': '1', 'output': 'Probe on localhost not needed'}) + # peer added + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_other) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server2'), {'exitval': '0', 'output': None}) - - mock = MagicMock(return_value=xml_peer_probe_fail_already_member) + # peer already member + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname']) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('salt'), - {'exitval': '0', 'output': 'Host salt port 24007 already in peer list'}) - - mock = MagicMock(return_value=xml_peer_probe_fail_localhost) + self.assertEqual(glusterfs.peer('server2'), + {'exitval': '2', 'output': 'Host server2 port 24007 already in peer list'}) + # peer in reverse + # by ip address + # peer added + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_other) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertEqual(glusterfs.peer('salt'), - {'exitval': '0', 'output': 'Probe on localhost not needed'}) - - mock = MagicMock(return_value=xml_peer_probe_fail_cant_connect) + self.assertEqual(glusterfs.peer('10.0.0.2'), + {'exitval': '0', 'output': None}) + # peer already member + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_already_peer['ip']) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises(CommandExecutionError, glusterfs.peer, 'salt') - - mock = MagicMock(return_value=True) - with patch.object(suc, 'check_name', mock): - self.assertRaises(SaltInvocationError, glusterfs.peer, 'a') + self.assertEqual(glusterfs.peer('10.0.0.2'), + {'exitval': '2', 'output': 'Host 10.0.0.2 port 24007 already in peer list'}) + # peer self + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_self) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.1'), + {'exitval': '1', 'output': 'Probe on localhost not needed'}) + # peer in reverse (probe server1 from server2) + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_first_hostname_from_second_first_time) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('server1'), + {'exitval': '2', 'output': 'Host server1 port 24007 already in peer list'}) + # peer in reverse using ip address instead of hostname first time + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.1'), + {'exitval': '0', 'output': None}) + mock = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time) + with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): + self.assertEqual(glusterfs.peer('10.0.0.1'), + {'exitval': '2', 'output': 'Host 10.0.0.1 port 24007 already in peer list'}) # 'create' function tests: 1 @@ -263,17 +501,31 @@ def test_create(self): mock = MagicMock(return_value='') with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): self.assertRaises( - SaltInvocationError, glusterfs.create, 'newvolume', 'host1:brick') + SaltInvocationError, + glusterfs.create, + 'newvolume', + 'host1:brick') mock = MagicMock(return_value='') with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): self.assertRaises( - SaltInvocationError, glusterfs.create, 'newvolume', 'host1/brick') + SaltInvocationError, + glusterfs.create, + 'newvolume', + 'host1/brick') mock = MagicMock(return_value=xml_command_fail) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): - self.assertRaises(CommandExecutionError, glusterfs.create, 'newvolume', 'host1:/brick', - True, True, True, 'tcp', True) + self.assertRaises( + CommandExecutionError, + glusterfs.create, + 'newvolume', + 'host1:/brick', + True, + True, + True, + 'tcp', + True) mock = MagicMock(return_value=xml_command_success) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): @@ -440,7 +692,10 @@ def test_delete(self): # volume exists, should not be stopped, and is started with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): self.assertRaises( - SaltInvocationError, glusterfs.delete, 'Newvolume1', False) + SaltInvocationError, + glusterfs.delete, + 'Newvolume1', + False) # volume exists, should be stopped, and is started with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): @@ -465,7 +720,10 @@ def test_add_volume_bricks(self): mock = MagicMock(return_value=xml_command_fail) with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): self.assertRaises( - CommandExecutionError, glusterfs.add_volume_bricks, 'Newvolume1', ['bricks']) + CommandExecutionError, + glusterfs.add_volume_bricks, + 'Newvolume1', + ['bricks']) ret = '1 bricks successfully added to the volume Newvolume1' # volume does exist @@ -483,11 +741,17 @@ def test_add_volume_bricks(self): # ... and the added brick does exist with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): # As a list - self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', ['bricks']), - 'Bricks already in volume Newvolume1') + self.assertEqual( + glusterfs.add_volume_bricks( + 'Newvolume1', + ['bricks']), + 'Bricks already in volume Newvolume1') # As a string - self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', 'bricks'), - 'Bricks already in volume Newvolume1') + self.assertEqual( + glusterfs.add_volume_bricks( + 'Newvolume1', + 'bricks'), + 'Bricks already in volume Newvolume1') # And empty list self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', []), 'Bricks already in volume Newvolume1') diff --git a/tests/unit/states/glusterfs_test.py b/tests/unit/states/glusterfs_test.py index 9644664e7d27..11dae0cdd67c 100644 --- a/tests/unit/states/glusterfs_test.py +++ b/tests/unit/states/glusterfs_test.py @@ -19,10 +19,13 @@ # Import Salt Libs from salt.states import glusterfs +from tests.unit.modules.glusterfs_test import GlusterResults +import salt.modules.glusterfs as mod_glusterfs + import salt.utils.cloud -import socket -glusterfs.__salt__ = {} +import salt.modules.glusterfs as mod_glusterfs +glusterfs.__salt__ = {'glusterfs.peer': mod_glusterfs.peer} glusterfs.__opts__ = {} @@ -37,47 +40,93 @@ def test_peered(self): ''' Test to verify if node is peered. ''' - name = 'salt' + name = 'server1' + other_name = 'server1' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - mock = MagicMock(side_effect=[[name], [''], [], [], [], [], [], [], - [name]]) - mock_lst = MagicMock(return_value=[]) - with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock, - 'glusterfs.peer': mock_lst}): - comt = ('Host {0} already peered'.format(name)) - ret.update({'comment': comt}) - self.assertDictEqual(glusterfs.peered(name), ret) - - with patch.dict(glusterfs.__opts__, {'test': True}): - comt = ('Peer {0} will be added.'.format(name)) - ret.update({'comment': comt, 'result': None}) + # probe new peer server2 under gluster 3.4.x + comt = ('Peer {0} added successfully.'.format(name)) + ret.update({'comment': comt, 'result': True, + 'changes': {'new': {name: []}, 'old': {}}}) + mock_xml = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_other) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{}, {name: []}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): self.assertDictEqual(glusterfs.peered(name), ret) - with patch.object(salt.utils.cloud, 'check_name', - MagicMock(return_value=True)): - comt = ('Invalid characters in peer name.') - ret.update({'comment': comt, 'result': False}) + # probe new peer server2 under gluster 3.7.x + mock_xml = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_other) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{}, {name: []}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): self.assertDictEqual(glusterfs.peered(name), ret) - with patch.object(socket, 'gethostname', - MagicMock(side_effect=[name, 'salt.host', - 'salt.host'])): - ret.update({'comment': '', 'result': True}) + # probe already existing server2 under gluster 3.4.x + comt = ('Host {0} already peered'.format(name)) + ret.update({'comment': comt, 'changes': {}}) + mock_xml = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname']) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{name: []}, {name: []}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): self.assertDictEqual(glusterfs.peered(name), ret) - comt = ('Host {0} already peered'.format(name)) - ret.update({'comment': '', 'result': True}) + # probe already existing server2 under gluster 3.7.x + mock_xml = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname']) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{name: []}, {name: []}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): self.assertDictEqual(glusterfs.peered(name), ret) - comt = ('Host {0} already peered'.format(name)) - ret.update({'comment': '', 'result': True, - 'changes': {'new': ['salt'], 'old': []}}) - self.assertDictEqual(glusterfs.peered(name), ret) + # Issue 30932: Peering an existing server by IP fails with gluster 3.7+ + # + # server2 was probed by address, 10.0.0.2. Under 3.4, server1 would be + # known as 10.0.0.1 but starting with 3.7, its hostname of server1 would be + # known instead. Subsequent probing of server1 by server2 used to result in + # "success_already_peer" but now it should succeed in adding an alternate + # hostname entry. + + name = 'server1' + ip = '10.0.0.1' + comt = ('Host {0} already peered'.format(ip)) + ret.update({'name': ip, 'comment': comt, 'changes': {}}) + mock_xml = MagicMock( + return_value=GlusterResults.v34.peer_probe.success_first_ip_from_second_first_time) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{ip: []}, {ip: []}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): + self.assertDictEqual(glusterfs.peered(ip), ret) + + comt = ('Peer {0} added successfully.'.format(ip)) + ret.update({'name': ip, 'comment': comt, 'changes': { + 'old': {name: []}, 'new': {name: [ip]}}}) + mock_xml = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{name: []}, {name: [ip]}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): + self.assertDictEqual(glusterfs.peered(ip), ret) + + comt = ('Host {0} already peered'.format(ip)) + ret.update({'name': ip, 'comment': comt, 'changes': {}}) + mock_xml = MagicMock( + return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time) + with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}): + mock = MagicMock(side_effect=[{name: [ip]}, {name: [ip]}]) + with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}): + self.assertDictEqual(glusterfs.peered(ip), ret) + + # test for invalid characters + comt = ('Invalid characters in peer name.') + ret.update({'name': '#badhostname', 'comment': comt, 'result': False}) + self.assertDictEqual(glusterfs.peered('#badhostname'), ret) # 'created' function tests: 1 @@ -178,20 +227,25 @@ def test_add_volume_bricks(self): {'glusterfs.status': mock, 'glusterfs.add_volume_bricks': mock_t}): ret.update({'comment': 'does not exist'}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) + self.assertDictEqual( + glusterfs.add_volume_bricks(name, bricks), ret) ret.update({'comment': 'is not started'}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) + self.assertDictEqual( + glusterfs.add_volume_bricks(name, bricks), ret) ret.update({'comment': 'bricks successfully added', 'result': True, 'changes': {'new': ['host1'], 'old': ['host1']}}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) + self.assertDictEqual( + glusterfs.add_volume_bricks(name, bricks), ret) ret.update({'comment': 'Bricks already in volume', 'changes': {}}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) + self.assertDictEqual( + glusterfs.add_volume_bricks(name, bricks), ret) ret.update({'comment': '', 'result': False}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) + self.assertDictEqual( + glusterfs.add_volume_bricks(name, bricks), ret) if __name__ == '__main__':