/
vm_utils.py
2184 lines (1781 loc) · 78.9 KB
/
vm_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
import cPickle as pickle
import decimal
import os
import re
import time
import urllib
import urlparse
import uuid
from xml.dom import minidom
from xml.parsers import expat
from eventlet import greenthread
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
from nova import db
from nova import exception
from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * 1024 * 1024,
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='other-config:i18n-key=local-storage',
help='Filter for finding the SR to be used to install guest '
'instances on. The default value is the Local Storage in '
'default XenServer/XCP installations. To select an SR '
'with a different matching criteria, you could set it to '
'other-config:my_favorite_sr=true. On the other hand, to '
'fall back on the Default SR, as displayed by XenCenter, '
'set this flag to: default-sr:true'),
cfg.BoolOpt('xenapi_sparse_copy',
default=True,
help='Whether to use sparse_copy for copying data on a '
'resize down (False will use standard dd). This speeds '
'up resizes down considerably since large runs of zeros '
'won\'t have to be rsynced'),
cfg.IntOpt('xenapi_num_vbd_unplug_retries',
default=10,
help='Maximum number of retries to unplug VBD'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(xenapi_vm_utils_opts)
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
MAX_VDI_CHAIN_SIZE = 16
class ImageType(object):
"""Enumeration class for distinguishing different image types
| 0 - kernel image (goes on dom0's filesystem)
| 1 - ramdisk image (goes on dom0's filesystem)
| 2 - disk image (local SR, partitioned by objectstore plugin)
| 3 - raw disk image (local SR, NOT partitioned by plugin)
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
"""
KERNEL = 0
RAMDISK = 1
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "root"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR)
@classmethod
def to_string(cls, image_type):
return dict(zip(ImageType._ids, ImageType._strs)).get(image_type)
@classmethod
def from_string(cls, image_type_str):
return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str)
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False):
"""Create a VM record. Returns new VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
inst_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(inst_type_id)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
'other_config': {'allowvssprovider': str(False),
'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': {},
'xenstore_data': {}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance['kernel_id']:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
vm_ref = session.call_xenapi('VM.create', rec)
LOG.debug(_('Created VM'), instance=instance)
return vm_ref
def destroy_vm(session, instance, vm_ref):
"""Destroys a VM record."""
try:
session.call_xenapi('VM.destroy', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
return
LOG.debug(_("VM destroyed"), instance=instance)
def shutdown_vm(session, instance, vm_ref, hard=True):
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
state = compile_info(vm_rec)['state']
if state == power_state.SHUTDOWN:
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
return
LOG.debug(_("Shutting down VM"), instance=instance)
try:
if hard:
session.call_xenapi('VM.hard_shutdown', vm_ref)
else:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
def ensure_free_mem(session, instance):
inst_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(inst_type_id)
mem = long(instance_type['memory_mb']) * 1024 * 1024
host = session.get_xenapi_host()
host_free_mem = long(session.call_xenapi("host.compute_free_memory",
host))
return host_free_mem >= mem
def find_vbd_by_number(session, vm_ref, number):
"""Get the VBD reference from the device number"""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vbd_rec = session.call_xenapi("VBD.get_record", vbd_ref)
if vbd_rec['userdevice'] == str(number):
return vbd_ref
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('VBD not found in instance %s') % vm_ref)
def unplug_vbd(session, vbd_ref):
"""Unplug VBD from VM"""
# Call VBD.unplug on the given VBD, with a retry if we get
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
max_attempts = FLAGS.xenapi_num_vbd_unplug_retries + 1
for num_attempt in xrange(1, max_attempts + 1):
try:
session.call_xenapi('VBD.unplug', vbd_ref)
return
except session.XenAPI.Failure, exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_('VBD %s already detached'), vbd_ref)
return
elif err == 'DEVICE_DETACH_REJECTED':
LOG.info(_('VBD %(vbd_ref)s detach rejected, attempt'
' %(num_attempt)d/%(max_attempts)d'), locals())
else:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to unplug VBD %s') % vbd_ref)
greenthread.sleep(1)
raise volume_utils.StorageError(
_('Reached maximum number of retries trying to unplug VBD %s')
% vbd_ref)
def destroy_vbd(session, vbd_ref):
"""Destroy VBD from host database"""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to destroy VBD %s') % vbd_ref)
def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
read_only=False, bootable=False):
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = vbd_type
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... '), locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.'), locals())
return vbd_ref
def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to destroy VDI %s') % vdi_ref)
def safe_destroy_vdis(session, vdi_refs):
"""Destroys the requested VDIs, logging any StorageError exceptions."""
for vdi_ref in vdi_refs:
try:
destroy_vdi(session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
otherconf = {'nova_disk_type': disk_type}
if instance:
otherconf['nova_instance_uuid'] = instance['uuid']
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': otherconf,
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'),
locals())
return vdi_ref
def get_vdis_for_boot_from_vol(session, instance, dev_params):
vdis = {}
sr_uuid = dev_params['sr_uuid']
sr_ref = volume_utils.find_sr_by_uuid(session,
sr_uuid)
if sr_ref:
session.call_xenapi("SR.scan", sr_ref)
return {'root': dict(uuid=dev_params['vdi_uuid'],
file=None)}
return vdis
def _volume_in_mapping(mount_device, block_device_info):
block_device_list = [block_device.strip_prefix(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_dev = swap['device_name']
block_device_list.append(block_device.strip_prefix(swap_dev))
block_device_list += [block_device.strip_prefix(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
return block_device.strip_prefix(mount_device) in block_device_list
def get_vdis_for_instance(context, session, instance, name_label, image,
image_type, block_device_info=None):
if block_device_info:
LOG.debug(_("block device info: %s"), block_device_info)
rootdev = block_device_info['root_device_name']
if _volume_in_mapping(rootdev, block_device_info):
# call function to return the vdi in connection info of block
# device.
# make it a point to return from here.
bdm_root_dev = block_device_info['block_device_mapping'][0]
dev_params = bdm_root_dev['connection_info']['data']
LOG.debug(dev_params)
return get_vdis_for_boot_from_vol(session,
instance,
dev_params)
return _create_image(context, session, instance, name_label, image,
image_type)
@contextlib.contextmanager
def _dummy_vm(session, instance, vdi_ref):
"""This creates a temporary VM so that we can snapshot a VDI.
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
work around this, we need to create a temporary VM and then map the VDI to
the VM using a temporary VBD.
"""
name_label = "dummy"
vm_ref = create_vm(session, instance, name_label, None, None)
try:
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
read_only=True)
try:
yield vm_ref
finally:
try:
destroy_vbd(session, vbd_ref)
except volume_utils.StorageError:
# destroy_vbd() will log error
pass
finally:
destroy_vm(session, instance, vm_ref)
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference.
This function differs from the XenAPI `VDI.copy` call in that the copy is
atomic and isolated, meaning we don't see half-downloaded images. It
accomplishes this by copying the VDI's into a temporary directory and then
atomically renaming them into the SR when the copy is completed.
The correct long term solution is to fix `VDI.copy` so that it is atomic
and isolated.
"""
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
label = "snapshot"
with snapshot_attached_here(
session, instance, vm_ref, label) as vdi_uuids:
params = {'sr_path': get_sr_path(session),
'vdi_uuids': vdi_uuids,
'uuid_stack': _make_uuid_stack()}
kwargs = {'params': pickle.dumps(params)}
result = session.call_plugin(
'workarounds', 'safe_copy_vdis', kwargs)
imported_vhds = jsonutils.loads(result)
root_uuid = imported_vhds['root']['uuid']
# TODO(sirp): for safety, we should probably re-scan the SR after every
# call to a dom0 plugin, since there is a possibility that the underlying
# VHDs changed
scan_default_sr(session)
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s') % locals())
return vdi_ref
def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
vdi_ref = vdi_ref or session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
session.call_xenapi('VDI.set_name_label', vdi_ref, label)
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
def get_vdi_for_vm_safely(session, vm_ref):
"""Retrieves the primary VDI for a VM"""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd in vbd_refs:
vbd_rec = session.call_xenapi("VBD.get_record", vbd)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == '0':
vdi_rec = session.call_xenapi("VDI.get_record", vbd_rec['VDI'])
return vbd_rec['VDI'], vdi_rec
raise exception.NovaException(_("No primary VDI found for %(vm_ref)s")
% locals())
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label):
LOG.debug(_("Starting snapshot for VM"), instance=instance)
# Memorize the original_parent_uuid so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
template_vm_ref, template_vdi_uuid = _create_snapshot(
session, instance, vm_ref, label)
try:
sr_ref = vm_vdi_rec["SR"]
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in
_walk_vdi_chain(session, template_vdi_uuid)]
yield vdi_uuids
finally:
_destroy_snapshot(session, instance, template_vm_ref)
def _create_snapshot(session, instance, vm_ref, label):
template_vm_ref = session.call_xenapi('VM.snapshot', vm_ref, label)
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_("Created snapshot %(template_vdi_uuid)s with label"
" '%(label)s'"), locals(), instance=instance)
return template_vm_ref, template_vdi_uuid
def _destroy_snapshot(session, instance, vm_ref):
vdi_refs = lookup_vm_vdis(session, vm_ref)
safe_destroy_vdis(session, vdi_refs)
destroy_vm(session, instance, vm_ref)
def get_sr_path(session):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
sr_ref = safe_find_sr(session)
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
"""Destroy used or unused cached images.
A cached image that is being used by at least one VM is said to be 'used'.
In the case of an 'unused' image, the cached image will be the only
descendent of the base-copy. So when we delete the cached-image, the
refcount will drop to zero and XenServer will automatically destroy the
base-copy for us.
The default behavior of this function is to destroy only 'unused' cached
images. To destroy all cached images, use the `all_cached=True` kwarg.
"""
cached_images = _find_cached_images(session, sr_ref)
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'"))
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
for vdi_ref in cached_images.values():
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
if all_cached:
destroy_cached_vdi(vdi_uuid, vdi_ref)
continue
# Unused-Only: Search for siblings
# Chain length greater than two implies a VM must be holding a ref to
# the base-copy (otherwise it would have coalesced), so consider this
# cached image used.
chain = list(_walk_vdi_chain(session, vdi_uuid))
if len(chain) > 2:
continue
elif len(chain) == 2:
# Siblings imply cached image is used
root_vdi_rec = chain[-1]
children = _child_vhds(session, sr_ref, root_vdi_rec['uuid'])
if len(children) > 1:
continue
destroy_cached_vdi(vdi_uuid, vdi_ref)
return destroyed
def _find_cached_images(session, sr_ref):
"""Return a dict(uuid=vdi_ref) representing all cached images."""
cached_images = {}
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
image_id = vdi_rec['other_config']['image-id']
except KeyError:
continue
cached_images[image_id] = vdi_ref
return cached_images
def _find_cached_image(session, image_id, sr_ref):
"""Returns the vdi-ref of the cached image."""
cached_images = _find_cached_images(session, sr_ref)
return cached_images.get(image_id)
def upload_image(context, session, instance, vdi_uuids, image_id):
"""Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
# NOTE(sirp): Currently we only support uploading images as VHD, there
# is no RAW equivalent (yet)
LOG.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s"), locals(), instance=instance)
glance_api_servers = glance.get_api_servers()
glance_host, glance_port, glance_use_ssl = glance_api_servers.next()
# TODO(sirp): this inherit-image-property code should probably go in
# nova/compute/manager so it can be shared across hypervisors
sys_meta = db.instance_system_metadata_get(context, instance['uuid'])
properties = {}
prefix = 'image_'
for key, value in sys_meta.iteritems():
if key.startswith(prefix):
key = key[len(prefix):]
if key in FLAGS.non_inheritable_image_properties:
continue
properties[key] = value
properties['auto_disk_config'] = instance['auto_disk_config']
properties['os_type'] = instance['os_type'] or FLAGS.default_os_type
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': glance_host,
'glance_port': glance_port,
'glance_use_ssl': glance_use_ssl,
'sr_path': get_sr_path(session),
'auth_token': getattr(context, 'auth_token', None),
'properties': properties}
kwargs = {'params': pickle.dumps(params)}
session.call_plugin('glance', 'upload_vhd', kwargs)
def resize_disk(session, instance, vdi_ref, instance_type):
# Copy VDI over to something we can resize
# NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
sr_ref = safe_find_sr(session)
copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
try:
# Resize partition and filesystem down
auto_configure_disk(session, copy_ref, instance_type['root_gb'])
# Create new VDI
vdi_size = instance_type['root_gb'] * 1024 * 1024 * 1024
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
'root', vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = instance_type['root_gb'] * 1024 * 1024 * 1024
_copy_partition(session, copy_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
finally:
destroy_vdi(session, copy_ref)
def auto_configure_disk(session, vdi_ref, new_gb):
"""Partition and resize FS to match the size specified by
instance_types.root_gb.
This is a fail-safe to prevent accidentally destroying data on a disk
erroneously marked as auto_disk_config=True.
The criteria for allowing resize are:
1. 'auto_disk_config' must be true for the instance (and image).
(If we've made it here, then auto_disk_config=True.)
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
"""
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
return
_num, start, old_sectors, ptype = partitions[0]
if ptype in ('ext3', 'ext4'):
new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type):
"""
Steps to programmatically generate a disk:
1. Create VDI of desired size
2. Attach VDI to compute worker
3. Create partition
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = 1024 * 1024
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create partition
dev_path = utils.make_dev_path(dev)
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True)
partition_start = 0
partition_end = size_mb
utils.execute('parted', '--script', dev_path,
'mkpart', 'primary',
str(partition_start),
str(partition_end),
run_as_root=True)
partition_path = utils.make_dev_path(dev, partition=1)
if fs_type == 'linux-swap':
utils.execute('mkswap', partition_path, run_as_root=True)
elif fs_type is not None:
utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
# 4. Create VBD between instance VM and swap VDI
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
destroy_vdi(session, vdi_ref)
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
fs_type = "vfat" if is_windows else "linux-swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'ephemeral', size_gb * 1024,
FLAGS.default_ephemeral_format)
def create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
If the image is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
if FLAGS.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
if filename == "":
return _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
else:
vdi_type = ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
def destroy_kernel_ramdisk(session, kernel, ramdisk):
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
def _create_cached_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
vdis = {}
if FLAGS.use_cow_images and sr_type != "ext":
LOG.warning(_("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %(sr_type)s. Ignoring the cow flag.")
% locals())
root_vdi_ref = _find_cached_image(session, image_id, sr_ref)
if root_vdi_ref is None:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
root_vdi = vdis['root']
root_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
root_vdi['uuid'])
set_vdi_name(session, root_vdi['uuid'], 'Glance Image %s' % image_id,
'root', vdi_ref=root_vdi_ref)
session.call_xenapi('VDI.add_to_other_config',
root_vdi_ref, 'image-id', str(image_id))
swap_vdi = vdis.get('swap')
if swap_vdi:
session.call_xenapi(
'VDI.add_to_other_config', root_vdi_ref, 'swap-disk',
str(swap_vdi['uuid']))
if FLAGS.use_cow_images and sr_type == 'ext':
new_vdi_ref = _clone_vdi(session, root_vdi_ref)
else:
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, root_vdi_ref)
# Set the name label for the image we just created and remove image id
# field from other-config.
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
vdi_type = ("root" if image_type == ImageType.DISK_VHD
else ImageType.to_string(image_type))
vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
# Create a swap disk if the glance image had one associated with it.
vdi_rec = session.call_xenapi('VDI.get_record', root_vdi_ref)
if 'swap-disk' in vdi_rec['other_config']:
swap_disk_uuid = vdi_rec['other_config']['swap-disk']
swap_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
swap_disk_uuid)
new_swap_vdi_ref = _safe_copy_vdi(
session, sr_ref, instance, swap_vdi_ref)
new_swap_vdi_uuid = session.call_xenapi('VDI.get_uuid',
new_swap_vdi_ref)
vdis['swap'] = dict(uuid=new_swap_vdi_uuid, file=None)
return vdis
def _create_image(context, session, instance, name_label, image_id,
image_type):
"""Creates VDI from the image stored in the local cache. If the image
is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
cache_images = FLAGS.cache_images.lower()
# Deterimine if the image is cacheable
if image_type == ImageType.DISK_ISO:
cache = False
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
# FIXME(sirp): This should be eager loaded like instance metadata
sys_meta = db.instance_system_metadata_get(context,
instance['uuid'])
try:
cache = utils.bool_from_str(sys_meta['image_cache_in_nova'])
except KeyError:
cache = False
elif cache_images == 'none':
cache = False
else:
LOG.warning(_("Unrecognized cache_images value '%s', defaulting to"
" True"), FLAGS.cache_images)
cache = True
# Fetch (and cache) the image
if cache:
vdis = _create_cached_image(context, session, instance, name_label,
image_id, image_type)
else:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
# Set the name label and description to easily identify what
# instance and disk it's for
for vdi_type, vdi in vdis.iteritems():
set_vdi_name(session, vdi['uuid'], name_label, vdi_type)
return vdis
def _fetch_image(context, session, instance, name_label, image_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
vdis = _fetch_vhd_image(context, session, instance, image_id)
else:
vdis = _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in vdis.iteritems():
vdi_uuid = vdi['uuid']
LOG.debug(_("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'"),
locals(), instance=instance)
return vdis
def _fetch_using_dom0_plugin_with_retry(context, session, image_id,
plugin_name, params, callback=None):
max_attempts = FLAGS.glance_num_retries + 1
sleep_time = 0.5
for attempt_num in xrange(1, max_attempts + 1):
LOG.info(_('download_vhd %(image_id)s, '
'attempt %(attempt_num)d/%(max_attempts)d, '
'params: %(params)s') % locals())
try:
if callback:
callback(params)
kwargs = {'params': pickle.dumps(params)}
result = session.call_plugin(plugin_name, 'download_vhd', kwargs)
return jsonutils.loads(result)
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'RetryableError':
LOG.error(_('download_vhd failed: %r') %
(exc.details[3:],))
else:
raise
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
raise exception.CouldNotFetchImage(image_id=image_id)
def _make_uuid_stack():
# NOTE(sirp): The XenAPI plugins run under Python 2.4
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR