/
api.py
2007 lines (1738 loc) · 87.9 KB
/
api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to volumes."""
import ast
import collections
import datetime
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import versionutils
import six
from cinder.api import common
from cinder.common import constants
from cinder import context
from cinder import db
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import keymgr as key_manager
from cinder import objects
from cinder.objects import base as objects_base
from cinder.objects import fields
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume.flows.api import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
allow_force_upload_opt = cfg.BoolOpt('enable_force_upload',
default=False,
help='Enables the Force option on '
'upload_to_image. This enables '
'running upload_volume on in-use '
'volumes for backends that '
'support it.')
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(allow_force_upload_opt)
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
AO_LIST = objects.VolumeAttachmentList
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(
target_obj.obj_to_primitive()['versioned_object.data'] or {})
else:
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
AVAILABLE_MIGRATION_STATUS = (None, 'deleting', 'error', 'success')
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = key_manager.API(CONF)
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval tuple of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = constants.VOLUME_TOPIC
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all_by_topic(ctxt, topic)
az_data = [(s.availability_zone, s.disabled)
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s.", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
LOG.info("Availability Zones retrieved successfully.")
return tuple(azs)
def _retype_is_possible(self, context,
source_type, target_type):
elevated = context.elevated()
# If encryptions are different, it is not allowed
# to create volume from source volume or snapshot.
if volume_types.volume_types_encryption_changed(
elevated,
source_type.id if source_type else None,
target_type.id if target_type else None):
return False
services = objects.ServiceList.get_all_by_topic(
elevated,
constants.VOLUME_TOPIC,
disabled=True)
if len(services.objects) == 1:
return True
source_extra_specs = {}
if source_type:
with source_type.obj_as_admin():
source_extra_specs = source_type.extra_specs
target_extra_specs = {}
if target_type:
with target_type.obj_as_admin():
target_extra_specs = target_type.extra_specs
if (volume_utils.matching_backend_name(
source_extra_specs, target_extra_specs)):
return True
return False
def _is_volume_migrating(self, volume):
# The migration status 'none' means no migration has ever been done
# before. The migration status 'error' means the previous migration
# failed. The migration status 'success' means the previous migration
# succeeded. The migration status 'deleting' means the source volume
# fails to delete after a migration.
# All of the statuses above means the volume is not in the process
# of a migration.
return (volume['migration_status'] not in
self.AVAILABLE_MIGRATION_STATUS)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None,
cgsnapshot=None, multiattach=False, source_cg=None,
group=None, group_snapshot=None, source_group=None):
check_policy(context, 'create_from_image' if image_id else 'create')
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possibility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation of true float or int).
if size and (not strutils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request: %s '
'(size argument must be an integer (or string '
'representation of an integer) and greater '
'than zero).') % size
raise exception.InvalidInput(reason=msg)
if consistencygroup and (not cgsnapshot and not source_cg):
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.volume_type_id
if volume_type.id not in cg_voltypeids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this consistency "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if group and (not group_snapshot and not source_group):
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a group.")
raise exception.InvalidInput(reason=msg)
vol_type_ids = [v_type.id for v_type in group.volume_types]
if volume_type.id not in vol_type_ids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type.id != source_volume.volume_type_id:
if not self._retype_is_possible(
context,
source_volume.volume_type,
volume_type):
msg = _("Invalid volume_type provided: %s (requested type "
"is not compatible; either match source volume, "
"or omit type argument).") % volume_type.id
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type.id != snapshot.volume_type_id:
if not self._retype_is_possible(context,
snapshot.volume.volume_type,
volume_type):
msg = _("Invalid volume_type provided: %s (requested "
"type is not compatible; recommend omitting "
"the type argument).") % volume_type.id
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
utils.check_metadata_properties(metadata)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata or {},
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
'multiattach': multiattach,
'group': group,
'group_snapshot': group_snapshot,
'source_group': source_group,
}
try:
sched_rpcapi = (self.scheduler_rpcapi if (
not cgsnapshot and not source_cg and
not group_snapshot and not source_group)
else None)
volume_rpcapi = (self.volume_rpcapi if (
not cgsnapshot and not source_cg and
not group_snapshot and not source_group)
else None)
flow_engine = create_volume.get_flow(self.db,
self.image_service,
availability_zones,
create_what,
sched_rpcapi,
volume_rpcapi)
except Exception:
msg = _('Failed to create api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vref = flow_engine.storage.fetch('volume')
LOG.info("Create volume request issued successfully.",
resource=vref)
return vref
@wrap_check_policy
def delete(self, context, volume,
force=False,
unmanage_only=False,
cascade=False):
if context.is_admin and context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if not volume.host:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reservations = None
if volume.status != 'error_managing':
LOG.debug("Decrease volume quotas only if status is not "
"error_managing.")
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception("Failed to update quota while "
"deleting volume.")
volume.destroy()
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
LOG.info("Delete volume request issued successfully.",
resource={'type': 'volume',
'id': volume.id})
return
if not unmanage_only:
volume.assert_not_frozen()
# Build required conditions for conditional update
expected = {
'attach_status': db.Not(fields.VolumeAttachStatus.ATTACHED),
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'consistencygroup_id': None,
'group_id': None}
# If not force deleting we have status conditions
if not force:
expected['status'] = ('available', 'error', 'error_restoring',
'error_extending', 'error_managing')
if cascade:
if force:
# Ignore status checks, but ensure snapshots are not part
# of a cgsnapshot.
filters = [~db.volume_has_snapshots_in_a_cgsnapshot_filter()]
else:
# Allow deletion if all snapshots are in an expected state
filters = [~db.volume_has_undeletable_snapshots_filter()]
else:
# Don't allow deletion of volume with snapshots
filters = [~db.volume_has_snapshots_filter()]
values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()}
if unmanage_only is True:
values['status'] = 'unmanaging'
if volume.status == 'error_managing':
values['status'] = 'error_managing_deleting'
result = volume.conditional_update(values, expected, filters)
if not result:
status = utils.build_or_str(expected.get('status'),
_('status must be %s and'))
msg = _('Volume %s must not be migrating, attached, belong to a '
'group or have snapshots.') % status
LOG.info(msg)
raise exception.InvalidVolume(reason=msg)
if cascade:
values = {'status': 'deleting'}
expected = {'cgsnapshot_id': None,
'group_snapshot_id': None}
if not force:
expected['status'] = ('available', 'error', 'deleting')
snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
context, volume.id)
for s in snapshots:
result = s.conditional_update(values, expected, filters)
if not result:
volume.update({'status': 'error_deleting'})
volume.save()
msg = _('Failed to update snapshot.')
raise exception.InvalidVolume(reason=msg)
cache = image_cache.ImageVolumeCache(self.db, self)
entry = cache.get_by_image_volume(context, volume.id)
if entry:
cache.evict(context, entry)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
try:
self.key_manager.delete(context, encryption_key_id)
except exception.CinderException as e:
LOG.warning("Unable to delete encryption key for "
"volume: %s.", e.msg, resource=volume)
except Exception:
LOG.exception("Unable to delete encryption key for "
"volume.")
self.volume_rpcapi.delete_volume(context,
volume,
unmanage_only,
cascade)
LOG.info("Delete volume request issued successfully.",
resource=volume)
@wrap_check_policy
def update(self, context, volume, fields):
# TODO(karthikp): Making sure volume is always oslo-versioned
# If not we convert it at the start of update method. This check
# needs to be removed once we have moved to ovo.
if not isinstance(volume, objects_base.CinderObject):
vol_obj = objects.Volume()
volume = objects.Volume._from_db_object(context, vol_obj, volume)
if volume.status == 'maintenance':
LOG.info("Unable to update volume, "
"because it is in maintenance.", resource=volume)
msg = _("The volume cannot be updated during maintenance.")
raise exception.InvalidVolume(reason=msg)
utils.check_metadata_properties(fields.get('metadata', None))
volume.update(fields)
volume.save()
LOG.info("Volume updated successfully.", resource=volume)
def get(self, context, volume_id, viewable_admin_meta=False):
volume = objects.Volume.get_by_id(context, volume_id)
try:
check_policy(context, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound to avoid providing info about
# the existence of an unauthorized volume id
raise exception.VolumeNotFound(volume_id=volume_id)
if viewable_admin_meta:
ctxt = context.elevated()
admin_metadata = self.db.volume_admin_metadata_get(ctxt,
volume_id)
volume.admin_metadata = admin_metadata
volume.obj_reset_changes()
LOG.info("Volume info retrieved successfully.", resource=volume)
return volume
def get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, viewable_admin_meta=False,
offset=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = utils.get_bool_param('all_tenants', filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = objects.VolumeList.get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = objects.VolumeList.get_all_by_project(
context, context.project_id, marker, limit,
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
offset=offset)
LOG.info("Get all volumes completed successfully.")
return volumes
def get_volume_summary(self, context, filters=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
all_tenants = utils.get_bool_param('all_tenants', filters)
filters.pop('all_tenants', None)
project_only = not (all_tenants and context.is_admin)
volumes = objects.VolumeList.get_volume_summary(context, project_only)
LOG.info("Get summary completed successfully.")
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
# FIXME(jdg): The objects don't have the db name entries
# so build the resource tag manually for now.
LOG.info("Snapshot retrieved successfully.",
resource={'type': 'snapshot',
'id': snapshot.id})
return snapshot
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
volume = objects.Volume.get_by_id(context, volume_id)
LOG.info("Volume retrieved successfully.", resource=volume)
return volume
def get_all_snapshots(self, context, search_opts=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if context.is_admin and 'all_tenants' in search_opts:
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = objects.SnapshotList.get_all(
context, search_opts, marker, limit, sort_keys, sort_dirs,
offset)
else:
snapshots = objects.SnapshotList.get_all_by_project(
context, context.project_id, search_opts, marker, limit,
sort_keys, sort_dirs, offset)
LOG.info("Get all snapshots completed successfully.")
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
expected = {'multiattach': volume.multiattach,
'status': (('available', 'in-use') if volume.multiattach
else 'available')}
result = volume.conditional_update({'status': 'attaching'}, expected)
if not result:
expected_status = utils.build_or_str(expected['status'])
msg = _('Volume status must be %(expected)s to reserve, but the '
'status is %(current)s.') % {'expected': expected_status,
'current': volume.status}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info("Reserve volume completed successfully.",
resource=volume)
@wrap_check_policy
def unreserve_volume(self, context, volume):
expected = {'status': 'attaching'}
# Status change depends on whether it has attachments (in-use) or not
# (available)
value = {'status': db.Case([(db.volume_has_attachments_filter(),
'in-use')],
else_='available')}
result = volume.conditional_update(value, expected)
if not result:
LOG.debug("Attempted to unreserve volume that was not "
"reserved, nothing to do.",
resource=volume)
return
LOG.info("Unreserve volume completed successfully.",
resource=volume)
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the
# user to see that the volume is 'detaching'. Having
# 'migration_status' set will have the same effect internally.
expected = {'status': 'in-use',
'attach_status': fields.VolumeAttachStatus.ATTACHED,
'migration_status': self.AVAILABLE_MIGRATION_STATUS}
result = volume.conditional_update({'status': 'detaching'}, expected)
if not (result or self._is_volume_migrating(volume)):
msg = _("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info("Begin detaching volume completed successfully.",
resource=volume)
@wrap_check_policy
def roll_detaching(self, context, volume):
volume.conditional_update({'status': 'in-use'},
{'status': 'detaching'})
LOG.info("Roll detaching of volume completed successfully.",
resource=volume)
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
if volume.status == 'maintenance':
LOG.info('Unable to attach volume, '
'because it is in maintenance.', resource=volume)
msg = _("The volume cannot be attached in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
# We add readonly metadata if it doesn't already exist
readonly = self.update_volume_admin_metadata(context.elevated(),
volume,
{'readonly': 'False'},
update=False)['readonly']
if readonly == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
attach_results = self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
LOG.info("Attach volume completed successfully.",
resource=volume)
return attach_results
@wrap_check_policy
def detach(self, context, volume, attachment_id):
if volume['status'] == 'maintenance':
LOG.info('Unable to detach volume, '
'because it is in maintenance.', resource=volume)
msg = _("The volume cannot be detached in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
detach_results = self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
LOG.info("Detach volume completed successfully.",
resource=volume)
return detach_results
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
if volume.status == 'maintenance':
LOG.info('Unable to initialize the connection for '
'volume, because it is in '
'maintenance.', resource=volume)
msg = _("The volume connection cannot be initialized in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
init_results = self.volume_rpcapi.initialize_connection(context,
volume,
connector)
LOG.info("Initialize volume connection completed successfully.",
resource=volume)
return init_results
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
LOG.info("Terminate volume connection completed successfully.",
resource=volume)
self.unreserve_volume(context, volume)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
if volume['status'] == 'maintenance':
LOG.info('Unable to accept transfer for volume, '
'because it is in maintenance.', resource=volume)
msg = _("The volume cannot accept transfer in maintenance mode.")
raise exception.InvalidVolume(reason=msg)
results = self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
LOG.info("Transfer volume completed successfully.",
resource=volume)
return results
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None,
group_snapshot_id=None):
volume.assert_not_frozen()
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id,
True, group_snapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id,
commit_quota=True,
group_snapshot_id=None):
check_policy(context, 'create_snapshot', volume)
if not volume.host:
msg = _("The snapshot cannot be created because volume has "
"not been scheduled to any host.")
raise exception.InvalidVolume(reason=msg)
if volume['status'] == 'maintenance':
LOG.info('Unable to create the snapshot for volume, '
'because it is in maintenance.', resource=volume)
msg = _("The snapshot cannot be created when the volume is in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume):
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Volume %(vol_id)s status must be available, "
"but current status is: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if commit_quota:
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(
context, e,
resource='snapshots',
size=volume.size)
utils.check_metadata_properties(metadata)
snapshot = None
try:
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'group_snapshot_id': group_snapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata or {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
if commit_quota:
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if snapshot.obj_attr_is_set('id'):
snapshot.destroy()
finally:
if commit_quota:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
cgsnapshot_id,
group_snapshot_id=None):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id,
group_snapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = objects.Snapshot(context=context, **options)
snapshot.create()
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
snap.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume):
check_policy(context, 'create_snapshot', volume)
if volume['status'] == 'maintenance':
LOG.info('Unable to create the snapshot for volume, '
'because it is in maintenance.', resource=volume)
msg = _("The snapshot cannot be created when the volume is in "
"maintenance mode.")
raise exception.InvalidVolume(reason=msg)
if self._is_volume_migrating(volume):
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'] == 'error':
msg = _("The snapshot cannot be created when the volume is "
"in error status.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='snapshots',
size=volume.size)
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id,
group_snapshot_id=None):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'group_snapshot_id': group_snapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None,
group_snapshot_id=None):
result = self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id,
group_snapshot_id)
LOG.info("Snapshot create request issued successfully.",
resource=result)
return result
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
result = self._create_snapshot(context, volume, name, description,
True, metadata)
LOG.info("Snapshot force create request issued successfully.",
resource=result)
return result
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False,
unmanage_only=False):
if not unmanage_only: