Skip to content

Commit

Permalink
Parallelize quantum unit testing:
Browse files Browse the repository at this point in the history
This change switches run_tests.sh to use testr
in parallel mode, instead of nosetests, in order
to enable unit tests to run in parallel.

By default, the number of parallel test processes
is set to the number of cores on the test machine.

A similar change was recently merged to nova:
  https://review.openstack.org/#/c/15078/

Some tests required updating to remove inter-testcase
dependencies.

bug #1099694

Change-Id: Idfb923d424342a07dcba88d70b971683f549f763
  • Loading branch information
echohead committed Apr 3, 2013
1 parent bea61bf commit 147038a
Show file tree
Hide file tree
Showing 5 changed files with 174 additions and 114 deletions.
25 changes: 9 additions & 16 deletions quantum/tests/unit/cisco/test_nexus_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,9 @@ def new_nexus_init(self):

self.addCleanup(self.patch_obj.stop)

def test_a_create_network(self):
def test_create_networks(self):
"""
Tests creation of two new Virtual Network.
Tests deletion of one Virtual Network.
This would result the following -
The Nexus device should have only one network
vlan configured on it's plugin configured
interfaces.
If running this test individually, run
test_nexus_clear_vlan after this test to clean
up the second vlan created by this test.
Tests creation of two new Virtual Networks.
"""
tenant_id = self.tenant_id
net_name = self.net_name
Expand Down Expand Up @@ -125,14 +117,15 @@ def test_a_create_network(self):
self.second_vlan_name)
self.assertEqual(new_net_dict[const.NET_VLAN_ID], self.second_vlan_id)

def test_b_nexus_delete_port(self):
def test_nexus_delete_port(self):
"""
Test to clean up second vlan of nexus device
created by test_create_delete_network. This
test will fail if it is run individually.
Test deletion of a vlan.
"""
self._cisco_nexus_plugin.create_network(
self.tenant_id, self.net_name, self.net_id, self.vlan_name,
self.vlan_id, self._hostname, INSTANCE)

expected_instance_id = self._cisco_nexus_plugin.delete_port(
INSTANCE, self.second_vlan_id
)
INSTANCE, self.vlan_id)

self.assertEqual(expected_instance_id, INSTANCE)
11 changes: 11 additions & 0 deletions quantum/tests/unit/openvswitch/test_ovs_tunnel.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,12 +297,23 @@ def testPortBound(self):
self.mox.VerifyAll()

def testPortUnbound(self):
self.mock_int_bridge.set_db_attribute('Port', VIF_PORT.port_name,
'tag', str(LVM.vlan))
self.mock_int_bridge.delete_flows(in_port=VIF_PORT.ofport)

action_string = 'mod_vlan_vid:%s,normal' % LV_ID
self.mock_tun_bridge.add_flow(priority=3, tun_id=LS_ID,
dl_dst=VIF_PORT.vif_mac,
actions=action_string)
self.mock_tun_bridge.delete_flows(dl_dst=VIF_MAC, tun_id=LS_ID)
self.mox.ReplayAll()

a = ovs_quantum_agent.OVSQuantumAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, True)
a.local_vlan_map[NET_UUID] = LVM
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID)
a.available_local_vlans = set([LV_ID])
a.local_vlan_map[NET_UUID] = LVM
a.port_unbound(VIF_ID, NET_UUID)
Expand Down
5 changes: 3 additions & 2 deletions quantum/tests/unit/test_l3_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -645,6 +645,7 @@ def test_router_add_interface_subnet(self):
'subnet.create.end',
'router.interface.create',
'router.interface.delete']
test_notifier.NOTIFICATIONS = []
with self.router() as r:
with self.subnet() as s:
body = self._router_interface_action('add',
Expand All @@ -666,8 +667,8 @@ def test_router_add_interface_subnet(self):
expected_code=exc.HTTPNotFound.code)

self.assertEqual(
set(n['event_type'] for n in test_notifier.NOTIFICATIONS),
set(exp_notifications))
set(exp_notifications),
set(n['event_type'] for n in test_notifier.NOTIFICATIONS))

def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
with mock.patch('quantum.context.Context.to_dict') as tdict:
Expand Down

0 comments on commit 147038a

Please sign in to comment.