/
request_filter.py
288 lines (229 loc) · 9.99 KB
/
request_filter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os_traits
from oslo_log import log as logging
from oslo_utils import timeutils
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.scheduler import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
TENANT_METADATA_KEY = 'filter_tenant_id'
def trace_request_filter(fn):
@functools.wraps(fn)
def wrapper(ctxt, request_spec):
timer = timeutils.StopWatch()
ran = False
with timer:
try:
ran = fn(ctxt, request_spec)
finally:
if ran:
# Only log info if the filter was enabled and not
# excluded for some reason
LOG.debug('Request filter %r took %.1f seconds',
fn.__name__, timer.elapsed())
return ran
return wrapper
@trace_request_filter
def isolate_aggregates(ctxt, request_spec):
"""Prepare list of aggregates that should be isolated.
This filter will prepare the list of aggregates that should be
ignored by the placement service. It checks if aggregates has metadata
'trait:<trait_name>='required' and if <trait_name> is not present in
either of flavor extra specs or image properties, then those aggregates
will be included in the list of isolated aggregates.
Precisely this filter gets the trait request form the image and
flavor and unions them. Then it accumulates the set of aggregates that
request traits are "non_matching_by_metadata_keys" and uses that to
produce the list of isolated aggregates.
"""
if not CONF.scheduler.enable_isolated_aggregate_filtering:
return False
# Get required traits set in flavor and image
res_req = utils.ResourceRequest(request_spec)
required_traits = res_req.all_required_traits
keys = ['trait:%s' % trait for trait in required_traits]
isolated_aggregates = (
objects.aggregate.AggregateList.get_non_matching_by_metadata_keys(
ctxt, keys, 'trait:', value='required'))
# Set list of isolated aggregates to destination object of request_spec
if isolated_aggregates:
if ('requested_destination' not in request_spec or
request_spec.requested_destination is None):
request_spec.requested_destination = objects.Destination()
destination = request_spec.requested_destination
destination.append_forbidden_aggregates(
agg.uuid for agg in isolated_aggregates)
return True
@trace_request_filter
def require_tenant_aggregate(ctxt, request_spec):
"""Require hosts in an aggregate based on tenant id.
This will modify request_spec to request hosts in an aggregate
defined specifically for the tenant making the request. We do that
by looking for a nova host aggregate with metadata indicating which
tenant it is for, and passing that aggregate uuid to placement to
limit results accordingly.
"""
enabled = CONF.scheduler.limit_tenants_to_placement_aggregate
agg_required = CONF.scheduler.placement_aggregate_required_for_tenants
if not enabled:
return False
aggregates = objects.AggregateList.get_by_metadata(
ctxt, value=request_spec.project_id)
aggregate_uuids_for_tenant = set([])
for agg in aggregates:
for key, value in agg.metadata.items():
if key.startswith(TENANT_METADATA_KEY):
aggregate_uuids_for_tenant.add(agg.uuid)
break
if aggregate_uuids_for_tenant:
if ('requested_destination' not in request_spec or
request_spec.requested_destination is None):
request_spec.requested_destination = objects.Destination()
destination = request_spec.requested_destination
destination.require_aggregates(aggregate_uuids_for_tenant)
LOG.debug('require_tenant_aggregate request filter added '
'aggregates %s for tenant %r',
','.join(aggregate_uuids_for_tenant),
request_spec.project_id)
elif agg_required:
LOG.warning('Tenant %(tenant)s has no available aggregates',
{'tenant': request_spec.project_id})
raise exception.RequestFilterFailed(
reason=_('No hosts available for tenant'))
return True
@trace_request_filter
def map_az_to_placement_aggregate(ctxt, request_spec):
"""Map requested nova availability zones to placement aggregates.
This will modify request_spec to request hosts in an aggregate that
matches the desired AZ of the user's request.
"""
if not CONF.scheduler.query_placement_for_availability_zone:
return False
az_hint = request_spec.availability_zone
if not az_hint:
return False
aggregates = objects.AggregateList.get_by_metadata(ctxt,
key='availability_zone',
value=az_hint)
if aggregates:
if ('requested_destination' not in request_spec or
request_spec.requested_destination is None):
request_spec.requested_destination = objects.Destination()
agg_uuids = [agg.uuid for agg in aggregates]
request_spec.requested_destination.require_aggregates(agg_uuids)
LOG.debug('map_az_to_placement_aggregate request filter added '
'aggregates %s for az %r',
','.join(agg_uuids),
az_hint)
return True
@trace_request_filter
def require_image_type_support(ctxt, request_spec):
"""Request type-specific trait on candidates.
This will modify the request_spec to request hosts that support the
disk_format of the image provided.
"""
if not CONF.scheduler.query_placement_for_image_type_support:
return False
if request_spec.is_bfv:
# We are booting from volume, and thus compute node image
# disk_format support does not matter.
return False
disk_format = request_spec.image.disk_format
trait_name = 'COMPUTE_IMAGE_TYPE_%s' % disk_format.upper()
if not hasattr(os_traits, trait_name):
LOG.error(
'Computed trait name %r is not valid; is os-traits up to date?',
trait_name)
return False
request_spec.root_required.add(trait_name)
LOG.debug('require_image_type_support request filter added required '
'trait %s', trait_name)
return True
@trace_request_filter
def transform_image_metadata(ctxt, request_spec):
"""Transform image metadata to required traits.
This will modify the request_spec to request hosts that support
virtualisation capabilities based on the image metadata properties.
"""
if not CONF.scheduler.image_metadata_prefilter:
return False
prefix_map = {
'hw_cdrom_bus': 'COMPUTE_STORAGE_BUS',
'hw_disk_bus': 'COMPUTE_STORAGE_BUS',
'hw_video_model': 'COMPUTE_GRAPHICS_MODEL',
'hw_vif_model': 'COMPUTE_NET_VIF_MODEL',
}
trait_names = []
for key, prefix in prefix_map.items():
if key in request_spec.image.properties:
value = request_spec.image.properties.get(key).replace(
'-', '_').upper()
trait_name = f'{prefix}_{value}'
if not hasattr(os_traits, trait_name):
LOG.error('Computed trait name %r is not valid; '
'is os-traits up to date?', trait_name)
return False
trait_names.append(trait_name)
for trait_name in trait_names:
LOG.debug(
'transform_image_metadata request filter added required '
'trait %s', trait_name
)
request_spec.root_required.add(trait_name)
return True
@trace_request_filter
def compute_status_filter(ctxt, request_spec):
"""Pre-filter compute node resource providers using COMPUTE_STATUS_DISABLED
The ComputeFilter filters out hosts for compute services that are
disabled. Compute node resource providers managed by a disabled compute
service should have the COMPUTE_STATUS_DISABLED trait set and be excluded
by this mandatory pre-filter.
"""
trait_name = os_traits.COMPUTE_STATUS_DISABLED
request_spec.root_forbidden.add(trait_name)
LOG.debug('compute_status_filter request filter added forbidden '
'trait %s', trait_name)
return True
@trace_request_filter
def accelerators_filter(ctxt, request_spec):
"""Allow only compute nodes with accelerator support.
This filter retains only nodes whose compute manager published the
COMPUTE_ACCELERATORS trait, thus indicating the version of n-cpu is
sufficient to handle accelerator requests.
"""
trait_name = os_traits.COMPUTE_ACCELERATORS
if request_spec.flavor.extra_specs.get('accel:device_profile'):
request_spec.root_required.add(trait_name)
LOG.debug('accelerators_filter request filter added required '
'trait %s', trait_name)
return True
ALL_REQUEST_FILTERS = [
require_tenant_aggregate,
map_az_to_placement_aggregate,
require_image_type_support,
compute_status_filter,
isolate_aggregates,
transform_image_metadata,
accelerators_filter,
]
def process_reqspec(ctxt, request_spec):
"""Process an objects.ReqestSpec before calling placement.
:param ctxt: A RequestContext
:param request_spec: An objects.RequestSpec to be inspected/modified
"""
for filter in ALL_REQUEST_FILTERS:
filter(ctxt, request_spec)