-
Notifications
You must be signed in to change notification settings - Fork 907
/
api.py
8655 lines (7162 loc) · 256 KB
/
api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
import collections
from collections import abc
import datetime as dt
import functools
import itertools
import re
import sys
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
import sqlalchemy as sa
from sqlalchemy import MetaData
from sqlalchemy import or_, and_
from sqlalchemy.orm import joinedload, undefer_group, load_only
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy import sql
from sqlalchemy.sql.expression import bindparam
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import true
from sqlalchemy.sql import func
from sqlalchemy.sql import sqltypes
from cinder.api import common
from cinder.common import sqlalchemyutils
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Map with cases where attach status differs from volume status
ATTACH_STATUS_MAP = {'attached': 'in-use', 'detached': 'available'}
options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite')
main_context_manager = enginefacade.transaction_context()
def get_engine():
return main_context_manager.writer.get_engine()
def dispose_engine():
get_engine().dispose()
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
raise exception.CinderException(
'Use of empty request context is deprecated'
)
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
###################
@require_context
@main_context_manager.reader
def resource_exists(context, model, resource_id):
conditions = [model.id == resource_id]
# Match non deleted resources by the id
if 'no' == context.read_deleted:
conditions.append(~model.deleted)
# If the context is not admin we limit it to the context's project
if is_user_context(context) and hasattr(model, 'project_id'):
conditions.append(model.project_id == context.project_id)
query = context.session.query(sql.exists().where(and_(*conditions)))
return query.scalar()
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, volume_id, *args, **kwargs):
if not resource_exists(context, models.Volume, volume_id):
raise exception.VolumeNotFound(volume_id=volume_id)
return f(context, volume_id, *args, **kwargs)
return wrapper
def require_snapshot_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and snapshot_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, snapshot_id, *args, **kwargs):
if not resource_exists(context, models.Snapshot, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return f(context, snapshot_id, *args, **kwargs)
return wrapper
def require_backup_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and backup_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, backup_id, *args, **kwargs):
if not resource_exists(context, models.Backup, backup_id):
raise exception.BackupNotFound(backup_id=backup_id)
return f(context, backup_id, *args, **kwargs)
return wrapper
def require_qos_specs_exists(f):
"""Decorator to require the specified QoS speces exist.
Requires the wrapped function to use context and qos_specs_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, qos_specs_id, *args, **kwargs):
if not resource_exists(
context,
models.QualityOfServiceSpecs,
qos_specs_id,
):
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return f(context, qos_specs_id, *args, **kwargs)
return wrapper
def handle_db_data_error(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
return wrapper
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: A request context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = context.session.query(model, *args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
elif read_deleted == 'int_no':
query = query.filter_by(deleted=0)
else:
msg = _("Unrecognized read_deleted value '%s'")
raise Exception(msg % read_deleted)
if project_only and is_user_context(context):
if model is models.VolumeAttachment:
# NOTE(dulek): In case of VolumeAttachment, we need to join
# `project_id` through `volume` relationship.
query = query.filter(
models.Volume.project_id == context.project_id
)
else:
query = query.filter_by(project_id=context.project_id)
return query
###################
def get_model_for_versioned_object(versioned_object):
if isinstance(versioned_object, str):
model_name = versioned_object
else:
model_name = versioned_object.obj_name()
if model_name == 'BackupImport':
return models.Backup
return getattr(models, model_name)
def _get_get_method(model):
# Exceptions to model to get methods, in general method names are a simple
# conversion changing ORM name from camel case to snake format and adding
# _get to the string
GET_EXCEPTIONS = {
models.ConsistencyGroup: consistencygroup_get,
models.VolumeType: _volume_type_get_full,
models.QualityOfServiceSpecs: qos_specs_get,
models.GroupType: _group_type_get_full,
models.CGSnapshot: cgsnapshot_get,
}
if model in GET_EXCEPTIONS:
return GET_EXCEPTIONS[model]
# General conversion
# Convert camel cased model name to snake format
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__)
# Get method must be snake formatted model name concatenated with _get
method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get'
return globals().get(method_name)
_GET_METHODS = {}
@require_context
@main_context_manager.reader
def get_by_id(context, model, id, *args, **kwargs):
# Add get method to cache dictionary if it's not already there
if not _GET_METHODS.get(model):
_GET_METHODS[model] = _get_get_method(model)
return _GET_METHODS[model](context, id, *args, **kwargs)
def condition_db_filter(model, field, value):
"""Create matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
"""
orm_field = getattr(model, field)
# For values that must match and are iterables we use IN
if isinstance(value, abc.Iterable) and not isinstance(value, str):
# We cannot use in_ when one of the values is None
if None not in value:
return orm_field.in_(value)
return or_(orm_field == v for v in value)
# For values that must match and are not iterables we use ==
return orm_field == value
def condition_not_db_filter(model, field, value, auto_none=True):
"""Create non matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
If auto_none is True then we'll consider NULL values as different as well,
like we do in Python and not like SQL does.
"""
result = ~condition_db_filter(model, field, value) # pylint: disable=E1130
if auto_none and (
(
isinstance(value, abc.Iterable)
and not isinstance(value, str)
and None not in value
)
or (value is not None)
):
orm_field = getattr(model, field)
result = or_(result, orm_field.is_(None))
return result
def is_orm_value(obj):
"""Check if object is an ORM field or expression."""
return isinstance(
obj,
(
sa.orm.attributes.InstrumentedAttribute,
sa.sql.expression.ColumnElement,
),
)
def _check_is_not_multitable(values, model):
"""Check that we don't try to do multitable updates.
Since PostgreSQL doesn't support multitable updates we want to always fail
if we have such a query in our code, even if with MySQL it would work.
"""
used_models = set()
for field in values:
if isinstance(field, sa.orm.attributes.InstrumentedAttribute):
used_models.add(field.class_)
elif isinstance(field, str):
used_models.add(model)
else:
raise exception.ProgrammingError(
reason=(
'DB Conditional update - Unknown field type, must be '
'string or ORM field.'
),
)
if len(used_models) > 1:
raise exception.ProgrammingError(
reason=(
'DB Conditional update - Error in query, multitable '
'updates are not supported.'
),
)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _conditional_update(
context,
model,
values,
expected_values,
filters=None,
include_deleted='no',
project_only=False,
order=None,
):
"""Compare-and-swap conditional update SQLAlchemy implementation."""
_check_is_not_multitable(values, model)
# Provided filters will become part of the where clause
where_conds = list(filters) if filters else []
# Build where conditions with operators ==, !=, NOT IN and IN
for field, condition in expected_values.items():
if not isinstance(condition, db.Condition):
condition = db.Condition(condition, field)
where_conds.append(condition.get_filter(model, field))
# Create the query with the where clause
query = model_query(
context, model, read_deleted=include_deleted, project_only=project_only
).filter(*where_conds)
# NOTE(geguileo): Some DBs' update method are order dependent, and they
# behave differently depending on the order of the values, example on a
# volume with 'available' status:
# UPDATE volumes SET previous_status=status, status='reyping'
# WHERE id='44f284f9-877d-4fce-9eb4-67a052410054';
# Will result in a volume with 'retyping' status and 'available'
# previous_status both on SQLite and MariaDB, but
# UPDATE volumes SET status='retyping', previous_status=status
# WHERE id='44f284f9-877d-4fce-9eb4-67a052410054';
# Will yield the same result in SQLite but will result in a volume with
# status and previous_status set to 'retyping' in MariaDB, which is not
# what we want, so order must be taken into consideration.
# Order for the update will be:
# 1- Order specified in argument order
# 2- Values that refer to other ORM field (simple and using operations,
# like size + 10)
# 3- Values that use Case clause (since they may be using fields as well)
# 4- All other values
order = list(order) if order else tuple()
orm_field_list = []
case_list = []
unordered_list = []
for key, value in values.items():
if isinstance(value, db.Case):
value = sa.case(
*value.whens,
value=value.value,
else_=value.else_,
)
if key in order:
# pylint: disable=E1137; ("order" is known to be a list, here)
order[order.index(key)] = (key, value)
continue
# NOTE(geguileo): Check Case first since it's a type of orm value
if isinstance(value, sql.elements.Case):
value_list = case_list
elif is_orm_value(value):
value_list = orm_field_list
else:
value_list = unordered_list
value_list.append((key, value))
update_args = {'synchronize_session': False}
# If we don't have to enforce any kind of order just pass along the values
# dictionary since it will be a little more efficient.
if order or orm_field_list or case_list:
# If we are doing an update with ordered parameters, we need to add
# remaining values to the list
values = itertools.chain(
order, orm_field_list, case_list, unordered_list
)
# And we have to tell SQLAlchemy that we want to preserve the order
update_args['update_args'] = {'preserve_parameter_order': True}
# Return True if we were able to change any DB entry, False otherwise
result = query.update(values, **update_args)
return 0 != result
@require_context
@main_context_manager.writer
def conditional_update(
context,
model,
values,
expected_values,
filters=None,
include_deleted='no',
project_only=False,
order=None,
):
"""Compare-and-swap conditional update SQLAlchemy implementation."""
return _conditional_update(
context,
model,
values,
expected_values,
filters=filters,
include_deleted=include_deleted,
project_only=project_only,
order=order,
)
###################
def _sync_volumes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
volumes, _ = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
snapshots, _ = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
backups, _ = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'backups'
return {key: backups}
def _sync_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, vol_gigs = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
_, snap_gigs = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: vol_gigs + snap_gigs}
def _sync_backup_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
key = 'backup_gigabytes'
_, backup_gigs = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: backup_gigs}
def _sync_groups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, groups = _group_data_get_for_project(context, project_id)
key = 'groups'
return {key: groups}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes,
'_sync_groups': _sync_groups,
}
###################
def _clean_filters(filters):
return {k: v for k, v in filters.items() if v is not None}
def _filter_host(field, value, match_level=None):
"""Generate a filter condition for host and cluster fields.
Levels are:
- 'pool': Will search for an exact match
- 'backend': Will search for exact match and value#*
- 'host'; Will search for exact match, value@* and value#*
If no level is provided we'll determine it based on the value we want to
match:
- 'pool': If '#' is present in value
- 'backend': If '@' is present in value and '#' is not present
- 'host': In any other case
:param field: ORM field. Ex: objects.Volume.model.host
:param value: String to compare with
:param match_level: 'pool', 'backend', or 'host'
"""
# If we don't set level we'll try to determine it automatically. LIKE
# operations are expensive, so we try to reduce them to the minimum.
if match_level is None:
if '#' in value:
match_level = 'pool'
elif '@' in value:
match_level = 'backend'
else:
match_level = 'host'
# Mysql is not doing case sensitive filtering, so we force it
conn_str = CONF.database.connection
if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']:
cmp_value = func.binary(value)
like_op = 'LIKE BINARY'
else:
cmp_value = value
like_op = 'LIKE'
conditions = [field == cmp_value]
if match_level != 'pool':
conditions.append(field.op(like_op)(value + '#%'))
if match_level == 'host':
conditions.append(field.op(like_op)(value + '@%'))
return or_(*conditions)
def _filter_time_comparison(field, time_filter_dict):
"""Generate a filter condition for time comparison operators"""
conditions = []
for operator in time_filter_dict:
filter_time = timeutils.normalize_time(time_filter_dict[operator])
if operator == 'gt':
conditions.append(field.op('>')(filter_time))
elif operator == 'gte':
conditions.append(field.op('>=')(filter_time))
if operator == 'eq':
conditions.append(field.op('=')(filter_time))
elif operator == 'neq':
conditions.append(field.op('!=')(filter_time))
if operator == 'lt':
conditions.append(field.op('<')(filter_time))
elif operator == 'lte':
conditions.append(field.op('<=')(filter_time))
return or_(*conditions)
def _clustered_bool_field_filter(query, field_name, filter_value):
# Now that we have clusters, a service is disabled/frozen if the service
# doesn't belong to a cluster or if it belongs to a cluster and the cluster
# itself is disabled/frozen.
if filter_value is not None:
query_filter = or_(
and_(
models.Service.cluster_name.is_(None),
getattr(models.Service, field_name),
),
and_(
models.Service.cluster_name.isnot(None),
sql.exists().where(
and_(
models.Cluster.name == models.Service.cluster_name,
models.Cluster.binary == models.Service.binary,
~models.Cluster.deleted,
getattr(models.Cluster, field_name),
)
),
),
)
if not filter_value:
query_filter = ~query_filter # pylint: disable=E1130
query = query.filter(query_filter)
return query
def _service_query(
context,
read_deleted='no',
host=None,
cluster_name=None,
is_up=None,
host_or_cluster=None,
backend_match_level=None,
disabled=None,
frozen=None,
**filters,
):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Service, filters):
return None
query = model_query(context, models.Service, read_deleted=read_deleted)
# Host and cluster are particular cases of filters, because we must
# retrieve not only exact matches (single backend configuration), but also
# match those that have the backend defined (multi backend configuration).
if host:
query = query.filter(
_filter_host(models.Service.host, host, backend_match_level)
)
if cluster_name:
query = query.filter(
_filter_host(
models.Service.cluster_name, cluster_name, backend_match_level
)
)
if host_or_cluster:
query = query.filter(
or_(
_filter_host(
models.Service.host, host_or_cluster, backend_match_level
),
_filter_host(
models.Service.cluster_name,
host_or_cluster,
backend_match_level,
),
)
)
query = _clustered_bool_field_filter(query, 'disabled', disabled)
query = _clustered_bool_field_filter(query, 'frozen', frozen)
if filters:
query = query.filter_by(**filters)
if is_up is not None:
date_limit = utils.service_expired_time()
svc = models.Service
filter_ = or_(
and_(svc.created_at.isnot(None), svc.created_at >= date_limit),
and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit),
)
query = query.filter(filter_ == is_up)
return query
@require_admin_context
@main_context_manager.writer
def service_destroy(context, service_id):
query = _service_query(context, id=service_id)
updated_values = models.Service.delete_values()
if not query.update(updated_values):
raise exception.ServiceNotFound(service_id=service_id)
return updated_values
@require_admin_context
@main_context_manager.reader
def service_get(context, service_id=None, backend_match_level=None, **filters):
"""Get a service that matches the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param service_id: Id of the service.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host method)
:raise ServiceNotFound: If service doesn't exist.
"""
query = _service_query(
context,
backend_match_level=backend_match_level,
id=service_id,
**filters,
)
service = None if not query else query.first()
if not service:
serv_id = service_id or filters.get('topic') or filters.get('binary')
raise exception.ServiceNotFound(
service_id=serv_id, host=filters.get('host')
)
return service
@require_admin_context
@main_context_manager.reader
def service_get_all(context, backend_match_level=None, **filters):
"""Get all services that match the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host method)
"""
query = _service_query(
context, backend_match_level=backend_match_level, **filters
)
return [] if not query else query.all()
@require_admin_context
@main_context_manager.reader
def service_get_by_uuid(context, service_uuid):
query = model_query(context, models.Service).filter_by(uuid=service_uuid)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_uuid)
return result
@require_admin_context
@main_context_manager.writer
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
service_ref.save(context.session)
return service_ref
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def service_update(context, service_id, values):
query = _service_query(context, id=service_id)
if 'disabled' in values:
entity = query.column_descriptions[0]['entity']
values = values.copy()
values['modified_at'] = values.get('modified_at', timeutils.utcnow())
values['updated_at'] = values.get('updated_at', entity.updated_at)
result = query.update(values)
if not result:
raise exception.ServiceNotFound(service_id=service_id)
###################
@require_admin_context
@main_context_manager.reader
def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name."""
if cluster_name:
model = models.Cluster
conditions = [model.name == volume_utils.extract_host(cluster_name)]
else:
model = models.Service
conditions = [model.host == volume_utils.extract_host(host)]
conditions.extend((~model.deleted, model.frozen))
query = context.session.query(sql.exists().where(and_(*conditions)))
frozen = query.scalar()
return frozen
###################
def _cluster_query(
context,
is_up=None,
get_services=False,
services_summary=False,
read_deleted='no',
name_match_level=None,
name=None,
**filters,
):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Cluster, filters):
return None
query = model_query(context, models.Cluster, read_deleted=read_deleted)
# Cluster is a special case of filter, because we must match exact match
# as well as hosts that specify the backend
if name:
query = query.filter(
_filter_host(models.Cluster.name, name, name_match_level)
)