/
test_aggregates.py
459 lines (377 loc) · 18.6 KB
/
test_aggregates.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_utils.fixture import uuidsentinel as uuids
from nova.scheduler.client import report
import nova.conf
from nova import context as nova_context
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova import utils
from nova.virt import fake
CONF = nova.conf.CONF
class AggregatesTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
ADMIN_API = True
def _add_hosts_to_aggregate(self):
"""List all compute services and add them all to an aggregate."""
compute_services = [s for s in self.api.get_services()
if s['binary'] == 'nova-compute']
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
for service in compute_services:
self.api.add_host_to_aggregate(agg['id'], service['host'])
return len(compute_services)
def test_add_hosts(self):
# Default case with one compute, mapped for us
self.assertEqual(1, self._add_hosts_to_aggregate())
def test_add_unmapped_host(self):
"""Ensure that hosts without mappings are still found and added"""
# Add another compute, but nuke its HostMapping
self.start_service('compute', host='compute2')
self.host_mappings['compute2'].destroy()
self.assertEqual(2, self._add_hosts_to_aggregate())
class AggregateRequestFiltersTest(test.TestCase,
integrated_helpers.InstanceHelperMixin):
microversion = 'latest'
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
self.flags(compute_driver=self.compute_driver)
super(AggregateRequestFiltersTest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.AllServicesCurrent())
placement = self.useFixture(func_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.api = self.admin_api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.scheduler_service = self.start_service('scheduler')
self.computes = {}
self.aggregates = {}
self._start_compute('host1')
self._start_compute('host2')
self.context = nova_context.get_admin_context()
self.report_client = report.SchedulerReportClient()
self.flavors = self.api.get_flavors()
# Aggregate with only host1
self._create_aggregate('only-host1')
self._add_host_to_aggregate('only-host1', 'host1')
# Aggregate with only host2
self._create_aggregate('only-host2')
self._add_host_to_aggregate('only-host2', 'host2')
# Aggregate with neither host
self._create_aggregate('no-hosts')
def _start_compute(self, host):
"""Start a nova compute service on the given host
:param host: the name of the host that will be associated to the
compute service.
:return: the nova compute service object
"""
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
compute = self.start_service('compute', host=host)
self.computes[host] = compute
return compute
def _create_aggregate(self, name):
agg = self.admin_api.post_aggregate({'aggregate': {'name': name}})
self.aggregates[name] = agg
def _get_provider_uuid_by_host(self, host):
"""Return the compute node uuid for a named compute host."""
# NOTE(gibi): the compute node id is the same as the compute node
# provider uuid on that compute
resp = self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _add_host_to_aggregate(self, agg, host):
"""Add a compute host to both nova and placement aggregates.
:param agg: Name of the nova aggregate
:param host: Name of the compute host
"""
agg = self.aggregates[agg]
self.admin_api.add_host_to_aggregate(agg['id'], host)
host_uuid = self._get_provider_uuid_by_host(host)
# Make sure we have a view of the provider we're about to mess with
# FIXME(efried): This should be a thing we can do without internals
self.report_client._ensure_resource_provider(
self.context, host_uuid, name=host)
self.report_client.aggregate_add_host(self.context, agg['uuid'], host)
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _boot_server(self, az=None):
server_req = self._build_minimal_create_server_request(
self.api, 'test-instance', flavor_id=self.flavors[0]['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none', az=az)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(created_server, 'BUILD')
return server
def _get_instance_host(self, server):
srv = self.admin_api.get_server(server['id'])
return srv['OS-EXT-SRV-ATTR:host']
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'availability_zone': az,
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _grant_tenant_aggregate(self, agg, tenants):
"""Grant a set of tenants access to use an aggregate.
:param agg: Name of the nova aggregate
:param tenants: A list of all tenant ids that will be allowed access
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'filter_tenant_id%i' % i: tenant
for i, tenant in enumerate(tenants)
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
class TenantAggregateFilterTest(AggregateRequestFiltersTest):
def setUp(self):
super(TenantAggregateFilterTest, self).setUp()
# Default to enabling the filter and making it mandatory
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
def test_tenant_id_required_fails_if_no_aggregate(self):
server = self._boot_server()
# Without granting our tenant permission to an aggregate, instance
# creates should fail since aggregates are required
self.assertEqual('ERROR', server['status'])
def test_tenant_id_not_required_succeeds_if_no_aggregate(self):
self.flags(placement_aggregate_required_for_tenants=False,
group='scheduler')
server = self._boot_server()
# Without granting our tenant permission to an aggregate, instance
# creates should still succeed since aggregates are not required
self.assertEqual('ACTIVE', server['status'])
def test_filter_honors_tenant_id(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with only host1 in it
# and boot some servers. They should all stack up on host1.
self._grant_tenant_aggregate('only-host1',
['foo', tenant, 'bar'])
server1 = self._boot_server()
server2 = self._boot_server()
self.assertEqual('ACTIVE', server1['status'])
self.assertEqual('ACTIVE', server2['status'])
# Grant our tenant access to the aggregate with only host2 in it
# and boot some servers. They should all stack up on host2.
self._grant_tenant_aggregate('only-host1',
['foo', 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
server3 = self._boot_server()
server4 = self._boot_server()
self.assertEqual('ACTIVE', server3['status'])
self.assertEqual('ACTIVE', server4['status'])
# Make sure the servers landed on the hosts we had access to at
# the time we booted them.
hosts = [self._get_instance_host(s)
for s in (server1, server2, server3, server4)]
expected_hosts = ['host1', 'host1', 'host2', 'host2']
self.assertEqual(expected_hosts, hosts)
def test_filter_with_empty_aggregate(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
server = self._boot_server()
self.assertEqual('ERROR', server['status'])
def test_filter_with_multiple_aggregates_for_tenant(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it,
# and one with a host.
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
# Boot several servers and make sure they all land on the
# only host we have access to.
for i in range(0, 4):
server = self._boot_server()
self.assertEqual('ACTIVE', server['status'])
self.assertEqual('host2', self._get_instance_host(server))
class HostNameWeigher(weights.BaseHostWeigher):
def _weigh_object(self, host_state, weight_properties):
"""Arbitrary preferring host1 over host2 over host3."""
weights = {'host1': 100, 'host2': 50, 'host3': 1}
return weights.get(host_state.host, 0)
class AvailabilityZoneFilterTest(AggregateRequestFiltersTest):
def setUp(self):
# Default to enabling the filter
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(AvailabilityZoneFilterTest, self).setUp()
def test_filter_with_az(self):
self._set_az_aggregate('only-host2', 'myaz')
server1 = self._boot_server(az='myaz')
server2 = self._boot_server(az='myaz')
hosts = [self._get_instance_host(s) for s in (server1, server2)]
self.assertEqual(['host2', 'host2'], hosts)
class TestAggregateFiltersTogether(AggregateRequestFiltersTest):
def setUp(self):
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(TestAggregateFiltersTogether, self).setUp()
# Default to enabling both filters
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
def test_tenant_with_az_match(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host1
self._set_az_aggregate('only-host1', 'myaz')
# Boot the server into that az and make sure we land
server = self._boot_server(az='myaz')
self.assertEqual('host1', self._get_instance_host(server))
def test_tenant_with_az_mismatch(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host2
self._set_az_aggregate('only-host2', 'myaz')
# Boot the server into that az and make sure we fail
server = self._boot_server(az='myaz')
self.assertIsNone(self._get_instance_host(server))
server = self.api.get_server(server['id'])
self.assertEqual('ERROR', server['status'])
class TestAggregateMultiTenancyIsolationFilter(
test.TestCase, integrated_helpers.InstanceHelperMixin):
def _start_compute(self, host):
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def setUp(self):
super(TestAggregateMultiTenancyIsolationFilter, self).setUp()
# Stub out glance, placement and neutron.
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.useFixture(func_fixtures.PlacementFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
# Start nova services.
self.start_service('conductor')
self.admin_api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
enabled_filters.append('AggregateMultiTenancyIsolation')
self.flags(enabled_filters=enabled_filters, group='filter_scheduler')
self.start_service('scheduler')
for host in ('host1', 'host2'):
self._start_compute(host)
def test_aggregate_multitenancy_isolation_filter(self):
"""Tests common scenarios with the AggregateMultiTenancyIsolation
filter:
* hosts in a tenant-isolated aggregate are only accepted for that
tenant
* hosts not in a tenant-isolated aggregate are acceptable for all
tenants, including tenants with access to the isolated-tenant
aggregate
"""
# Create a tenant-isolated aggregate for the non-admin user.
user_api = self.useFixture(
nova_fixtures.OSAPIFixture(api_version='v2.1',
project_id=uuids.non_admin)).api
agg_id = self.admin_api.post_aggregate(
{'aggregate': {'name': 'non_admin_agg'}})['id']
meta_req = {'set_metadata': {
'metadata': {'filter_tenant_id': uuids.non_admin}}}
self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, meta_req)
# Add host2 to the aggregate; we'll restrict host2 to the non-admin
# tenant.
host_req = {'add_host': {'host': 'host2'}}
self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, host_req)
# Stub out select_destinations to assert how many host candidates were
# available per tenant-specific request.
original_filtered_hosts = (
nova.scheduler.host_manager.HostManager.get_filtered_hosts)
def spy_get_filtered_hosts(*args, **kwargs):
self.filtered_hosts = original_filtered_hosts(*args, **kwargs)
return self.filtered_hosts
self.stub_out(
'nova.scheduler.host_manager.HostManager.get_filtered_hosts',
spy_get_filtered_hosts)
# Create a server for the admin - should only have one host candidate.
server_req = self._build_minimal_create_server_request(
self.admin_api,
'test_aggregate_multitenancy_isolation_filter-admin',
networks='none') # requires microversion 2.37
server_req = {'server': server_req}
with utils.temporary_mutation(self.admin_api, microversion='2.37'):
server = self.admin_api.post_server(server_req)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Assert it's not on host2 which is isolated to the non-admin tenant.
self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host'])
self.assertEqual(1, len(self.filtered_hosts))
# Now create a server for the non-admin tenant to which host2 is
# isolated via the aggregate, but the other compute host is a
# candidate. We don't assert that the non-admin tenant server shows
# up on host2 because the other host, which is not isolated to the
# aggregate, is still a candidate.
server_req = self._build_minimal_create_server_request(
user_api,
'test_aggregate_multitenancy_isolation_filter-user',
networks='none') # requires microversion 2.37
server_req = {'server': server_req}
with utils.temporary_mutation(user_api, microversion='2.37'):
server = user_api.post_server(server_req)
self._wait_for_state_change(user_api, server, 'ACTIVE')
self.assertEqual(2, len(self.filtered_hosts))