-
Notifications
You must be signed in to change notification settings - Fork 237
/
libvirt.py
3279 lines (2960 loc) · 128 KB
/
libvirt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
High-level libvirt test utility functions.
This module is meant to reduce code size by performing common test procedures.
Generally, code here should look like test code.
More specifically:
- Functions in this module should raise exceptions if things go wrong
- Functions in this module typically use functions and classes from
lower-level modules (e.g. utils_misc, qemu_vm, aexpect).
- Functions in this module should not be used by lower-level linux_modules.
- Functions in this module should be used in the right context.
For example, a function should not be used where it may display
misleading or inaccurate info or debug messages.
:copyright: 2014 Red Hat Inc.
"""
import re
import os
import ast
import logging
import shutil
import threading
import time
import sys
import aexpect
from avocado.core import exceptions
from avocado.utils import path as utils_path
from avocado.utils import process
from avocado.utils import stacktrace
from avocado.utils import linux_modules
from avocado.utils import distro
from .. import virsh
from .. import xml_utils
from .. import iscsi
from .. import nfs
from .. import data_dir
from .. import utils_misc
from .. import utils_selinux
from .. import libvirt_storage
from .. import utils_net
from .. import gluster
from .. import remote
from .. import test_setup
from ..utils_iptables import Iptables
from ..staging import lv_utils
from ..utils_libvirtd import service_libvirtd_control
from ..libvirt_xml import vm_xml
from ..libvirt_xml import network_xml
from ..libvirt_xml import xcepts
from ..libvirt_xml import NetworkXML
from ..libvirt_xml import IPXML
from ..libvirt_xml import pool_xml
from ..libvirt_xml import nwfilter_xml
from ..libvirt_xml import vol_xml
from ..libvirt_xml import secret_xml
from ..libvirt_xml.devices import disk
from ..libvirt_xml.devices import hostdev
from ..libvirt_xml.devices import controller
from ..libvirt_xml.devices import seclabel
from ..libvirt_xml.devices import channel
ping = utils_net.ping
class LibvirtNetwork(object):
"""
Class to create a temporary network for testing.
"""
def create_vnet_xml(self):
"""
Create XML for a virtual network.
"""
address = self.kwargs.get('address')
if not address:
raise exceptions.TestError('Create vnet need address be set')
net_xml = NetworkXML()
net_xml.name = self.name
ip = IPXML(address=address)
dhcp_start = self.kwargs.get('dhcp_start')
dhcp_end = self.kwargs.get('dhcp_end')
if all([dhcp_start, dhcp_end]):
ip.dhcp_ranges = {'start': dhcp_start, 'end': dhcp_end}
net_xml.ip = ip
return address, net_xml
def create_macvtap_xml(self):
"""
Create XML for a macvtap network.
"""
iface = self.kwargs.get('iface')
if not iface:
raise exceptions.TestError('Create macvtap need iface be set')
net_xml = NetworkXML()
net_xml.name = self.name
net_xml.forward = {'mode': 'bridge', 'dev': iface}
ip = utils_net.get_ip_address_by_interface(iface)
return ip, net_xml
def create_bridge_xml(self):
"""
Create XML for a bridged network.
"""
iface = self.kwargs.get('iface')
if not iface:
raise exceptions.TestError('Create bridge need iface be set')
net_xml = NetworkXML()
net_xml.name = self.name
net_xml.forward = {'mode': 'bridge'}
net_xml.bridge = {'name': iface}
ip = utils_net.get_ip_address_by_interface(iface)
return ip, net_xml
def __init__(self, net_type, **kwargs):
self.kwargs = kwargs
net_name = kwargs.get('net_name')
if net_name is None:
self.name = 'avocado-vt-%s' % net_type
else:
self.name = net_name
self.persistent = kwargs.get('persistent', False)
if net_type == 'vnet':
self.ip, net_xml = self.create_vnet_xml()
elif net_type == 'macvtap':
self.ip, net_xml = self.create_macvtap_xml()
elif net_type == 'bridge':
self.ip, net_xml = self.create_bridge_xml()
else:
raise exceptions.TestError(
'Unknown libvirt network type %s' % net_type)
if self.persistent:
net_xml.define()
net_xml.start()
else:
net_xml.create()
def cleanup(self):
"""
Clear up network.
"""
virsh.net_destroy(self.name)
if self.persistent:
virsh.net_undefine(self.name)
def cpus_parser(cpulist):
"""
Parse a list of cpu list, its syntax is a comma separated list,
with '-' for ranges and '^' denotes exclusive.
:param cpulist: a list of physical CPU numbers
"""
hyphens = []
carets = []
commas = []
others = []
if cpulist is None:
return None
else:
if "," in cpulist:
cpulist_list = re.split(",", cpulist)
for cpulist in cpulist_list:
if "-" in cpulist:
tmp = re.split("-", cpulist)
hyphens = hyphens + range(int(tmp[0]), int(tmp[-1]) + 1)
elif "^" in cpulist:
tmp = re.split("\^", cpulist)[-1]
carets.append(int(tmp))
else:
try:
commas.append(int(cpulist))
except ValueError:
logging.error("The cpulist has to be an "
"integer. (%s)", cpulist)
elif "-" in cpulist:
tmp = re.split("-", cpulist)
hyphens = range(int(tmp[0]), int(tmp[-1]) + 1)
elif "^" in cpulist:
tmp = re.split("^", cpulist)[-1]
carets.append(int(tmp))
else:
try:
others.append(int(cpulist))
return others
except ValueError:
logging.error("The cpulist has to be an "
"integer. (%s)", cpulist)
cpus_set = set(hyphens).union(set(commas)).difference(set(carets))
return sorted(list(cpus_set))
def cpus_string_to_affinity_list(cpus_string, num_cpus):
"""
Parse the cpus_string string to a affinity list.
e.g
host_cpu_count = 4
0 --> [y,-,-,-]
0,1 --> [y,y,-,-]
0-2 --> [y,y,y,-]
0-2,^2 --> [y,y,-,-]
r --> [y,y,y,y]
"""
# Check the input string.
single_pattern = r"\d+"
between_pattern = r"\d+-\d+"
exclude_pattern = r"\^\d+"
sub_pattern = r"(%s)|(%s)|(%s)" % (exclude_pattern,
single_pattern, between_pattern)
pattern = r"^((%s),)*(%s)$" % (sub_pattern, sub_pattern)
if not re.match(pattern, cpus_string):
logging.debug("Cpus_string=%s is not a supported format for cpu_list."
% cpus_string)
# Init a list for result.
affinity = []
for i in range(int(num_cpus)):
affinity.append('-')
# Letter 'r' means all cpus.
if cpus_string == "r":
for i in range(len(affinity)):
affinity[i] = "y"
return affinity
# Split the string with ','.
sub_cpus = cpus_string.split(",")
# Parse each sub_cpus.
for cpus in sub_cpus:
if "-" in cpus:
minmum = cpus.split("-")[0]
maxmum = cpus.split("-")[-1]
for i in range(int(minmum), int(maxmum) + 1):
affinity[i] = "y"
elif "^" in cpus:
affinity[int(cpus.strip("^"))] = "-"
else:
affinity[int(cpus)] = "y"
return affinity
def cpu_allowed_list_by_task(pid, tid):
"""
Get the Cpus_allowed_list in status of task.
"""
cmd = "cat /proc/%s/task/%s/status|grep Cpus_allowed_list:| awk '{print $2}'" % (
pid, tid)
result = process.run(cmd, ignore_status=True, shell=True)
if result.exit_status:
return None
return result.stdout.strip()
def clean_up_snapshots(vm_name, snapshot_list=[], domxml=None):
"""
Do recovery after snapshot
:param vm_name: Name of domain
:param snapshot_list: The list of snapshot name you want to remove
:param domxml: The object of domain xml for dumpxml command
"""
if not snapshot_list:
# Get all snapshot names from virsh snapshot-list
snapshot_list = virsh.snapshot_list(vm_name)
# Get snapshot disk path
for snap_name in snapshot_list:
# Delete useless disk snapshot file if exists
snap_xml = virsh.snapshot_dumpxml(vm_name,
snap_name).stdout.strip()
xtf_xml = xml_utils.XMLTreeFile(snap_xml)
disks_path = xtf_xml.findall('disks/disk/source')
for disk in disks_path:
os.system('rm -f %s' % disk.get('file'))
# Delete snapshots of vm
virsh.snapshot_delete(vm_name, snap_name)
# External disk snapshot couldn't be deleted by virsh command,
# It need to be deleted by qemu-img command
snapshot_list = virsh.snapshot_list(vm_name)
if snapshot_list:
# Delete snapshot metadata first
for snap_name in snapshot_list:
virsh.snapshot_delete(vm_name, snap_name, "--metadata")
# Delete all snapshot by qemu-img.
# Domain xml should be proviced by parameter, we can't get
# the image name from dumpxml command, it will return a
# snapshot image name
if domxml:
disks_path = domxml.xmltreefile.findall('devices/disk/source')
for disk in disks_path:
img_name = disk.get('file')
snaps = utils_misc.get_image_snapshot(img_name)
cmd = "qemu-img snapshot %s" % img_name
for snap in snaps:
process.run("%s -d %s" % (cmd, snap))
else:
# Get snapshot disk path from domain xml because
# there is no snapshot info with the name
dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile
disk_path = dom_xml.find('devices/disk/source').get('file')
for name in snapshot_list:
snap_disk_path = disk_path.split(".")[0] + "." + name
os.system('rm -f %s' % snap_disk_path)
def get_all_cells():
"""
Use virsh freecell --all to get all cells on host
::
# virsh freecell --all
0: 124200 KiB
1: 1059868 KiB
--------------------
Total: 1184068 KiB
That would return a dict like:
::
cell_dict = {"0":"124200 KiB", "1":"1059868 KiB", "Total":"1184068 KiB"}
:return: cell_dict
"""
fc_result = virsh.freecell(options="--all", ignore_status=True)
if fc_result.exit_status:
if fc_result.stderr.count("NUMA not supported"):
raise exceptions.TestSkipError(fc_result.stderr.strip())
else:
raise exceptions.TestFail(fc_result.stderr.strip())
output = fc_result.stdout.strip()
cell_list = output.splitlines()
# remove "------------" line
del cell_list[-2]
cell_dict = {}
for cell_line in cell_list:
cell_info = cell_line.split(":")
cell_num = cell_info[0].strip()
cell_mem = cell_info[-1].strip()
cell_dict[cell_num] = cell_mem
return cell_dict
def check_blockjob(vm_name, target, check_point="none", value="0"):
"""
Run blookjob command to check block job progress, bandwidth, ect.
:param vm_name: Domain name
:param target: Domian disk target dev
:param check_point: Job progrss, bandwidth or none(no job)
:param value: Value of progress, bandwidth(with unit) or 0(no job)
:return: Boolean value, true for pass, false for fail
"""
if check_point not in ["progress", "bandwidth", "none"]:
logging.error("Check point must be: progress, bandwidth or none")
return False
try:
cmd_result = virsh.blockjob(
vm_name, target, "--info", debug=True, ignore_status=True)
output = cmd_result.stdout.strip()
err = cmd_result.stderr.strip()
status = cmd_result.exit_status
except Exception as e:
logging.error("Error occurred: %s", e)
return False
if status:
logging.error("Run blockjob command fail")
return False
# libvirt print block job progress to stderr
if check_point == 'none':
if len(err):
logging.error("Expect no job but find block job:\n%s", err)
return False
return True
if check_point == "progress":
progress = value + " %"
if re.search(progress, err):
return True
return False
# Since 1.3.3-1, libvirt support bytes and scaled integers for bandwith,
# and the output of blockjob may looks like:
# # virsh blockjob avocado-vt-vm1 vda --info
# Block Copy: [100 %] Bandwidth limit: 9223372036853727232 bytes/s (8.000 EiB/s)
#
# So we need specific the bandwidth unit when calling this function
# and universalize the unit before comparing
if check_point == "bandwidth":
try:
bandwidth, unit = re.findall(r'(\d+) (\w+)/s', output)[0]
# unit could be 'bytes' or 'Mib'
if unit == 'bytes':
unit = 'B'
else:
unit = 'M'
u_value = utils_misc.normalize_data_size(value, unit)
if float(u_value) == float(bandwidth):
logging.debug("Bandwidth is equal to %s", bandwidth)
return True
logging.error("Bandwidth is not equal to %s", bandwidth)
return False
except Exception as e:
logging.error("Fail to get bandwidth: %s", e)
return False
def setup_or_cleanup_nfs(is_setup, mount_dir="nfs-mount", is_mount=False,
export_options="rw,no_root_squash",
mount_options="rw",
export_dir="nfs-export",
restore_selinux="",
rm_export_dir=True):
"""
Set SElinux to "permissive" and Set up nfs service on localhost.
Or clean up nfs service on localhost and restore SElinux.
Note: SElinux status must be backed up and restored after use.
Example:
# Setup NFS.
res = setup_or_cleanup_nfs(is_setup=True)
# Backup SELinux status.
selinux_bak = res["selinux_status_bak"]
# Do something.
...
# Cleanup NFS and restore NFS.
res = setup_or_cleanup_nfs(is_setup=False, restore_selinux=selinux_bak)
:param is_setup: Boolean value, true for setup, false for cleanup
:param mount_dir: NFS mount dir. This can be an absolute path on the
host or a relative path origin from libvirt tmp dir.
Default to "nfs-mount".
:param is_mount: Boolean value, Whether the target NFS should be mounted.
:param export_options: Options for nfs dir. Default to "nfs-export".
:param mount_options: Options for mounting nfs dir. Default to "rw".
:param export_dir: NFS export dir. This can be an absolute path on the
host or a relative path origin from libvirt tmp dir.
Default to "nfs-export".
:param rm_export_dir: Boolean, True for forcely removing nfs export dir
False for keeping nfs export dir
:return: A dict contains export and mount result parameters:
export_dir: Absolute directory of exported local NFS file system.
mount_dir: Absolute directory NFS file system mounted on.
selinux_status_bak: SELinux status before set
"""
result = {}
ubuntu = distro.detect().name == 'Ubuntu'
tmpdir = data_dir.get_tmp_dir()
if not os.path.isabs(export_dir):
export_dir = os.path.join(tmpdir, export_dir)
if not os.path.isabs(mount_dir):
mount_dir = os.path.join(tmpdir, mount_dir)
result["export_dir"] = export_dir
result["mount_dir"] = mount_dir
result["selinux_status_bak"] = None
if not ubuntu:
result["selinux_status_bak"] = utils_selinux.get_status()
nfs_params = {"nfs_mount_dir": mount_dir, "nfs_mount_options": mount_options,
"nfs_mount_src": export_dir, "setup_local_nfs": "yes",
"export_options": export_options}
_nfs = nfs.Nfs(nfs_params)
if is_setup:
# Set selinux to permissive that the file in nfs
# can be used freely
if not ubuntu and utils_selinux.is_enforcing():
utils_selinux.set_status("permissive")
_nfs.setup()
if not is_mount:
_nfs.umount()
del result["mount_dir"]
else:
if not ubuntu and restore_selinux:
utils_selinux.set_status(restore_selinux)
_nfs.unexportfs_in_clean = True
_nfs.rm_mount_dir = True
_nfs.rm_export_dir = rm_export_dir
_nfs.cleanup()
return result
def setup_or_cleanup_iscsi(is_setup, is_login=True,
emulated_image="emulated-iscsi", image_size="1G",
chap_user="", chap_passwd="", restart_tgtd="no",
portal_ip="127.0.0.1"):
"""
Set up(and login iscsi target) or clean up iscsi service on localhost.
:param is_setup: Boolean value, true for setup, false for cleanup
:param is_login: Boolean value, true for login, false for not login
:param emulated_image: name of iscsi device
:param image_size: emulated image's size
:param chap_user: CHAP authentication username
:param chap_passwd: CHAP authentication password
:return: iscsi device name or iscsi target
"""
tmpdir = data_dir.get_tmp_dir()
emulated_path = os.path.join(tmpdir, emulated_image)
emulated_target = ("iqn.%s.com.virttest:%s.target" %
(time.strftime("%Y-%m"), emulated_image))
iscsi_params = {"emulated_image": emulated_path, "target": emulated_target,
"image_size": image_size, "iscsi_thread_id": "virt",
"chap_user": chap_user, "chap_passwd": chap_passwd,
"restart_tgtd": restart_tgtd, "portal_ip": portal_ip}
_iscsi = iscsi.Iscsi.create_iSCSI(iscsi_params)
if is_setup:
if is_login:
_iscsi.login()
# The device doesn't necessarily appear instantaneously, so give
# about 5 seconds for it to appear before giving up
iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1,
"Searching iscsi device name.")
if iscsi_device:
logging.debug("iscsi device: %s", iscsi_device)
return iscsi_device
if not iscsi_device:
logging.error("Not find iscsi device.")
# Cleanup and return "" - caller needs to handle that
# _iscsi.export_target() will have set the emulated_id and
# export_flag already on success...
_iscsi.cleanup()
process.run("rm -f %s" % emulated_path)
else:
_iscsi.export_target()
return (emulated_target, _iscsi.luns)
else:
_iscsi.export_flag = True
_iscsi.emulated_id = _iscsi.get_target_id()
_iscsi.cleanup()
process.run("rm -f %s" % emulated_path)
return ""
def get_host_ipv4_addr():
"""
Get host ipv4 addr
"""
if_up = utils_net.get_net_if(state="UP")
for i in if_up:
ipv4_value = utils_net.get_net_if_addrs(i)["ipv4"]
logging.debug("ipv4_value is %s", ipv4_value)
if ipv4_value != []:
ip_addr = ipv4_value[0]
break
if ip_addr is not None:
logging.info("ipv4 address is %s", ip_addr)
else:
raise exceptions.TestFail("Fail to get ip address")
return ip_addr
def setup_or_cleanup_gluster(is_setup, vol_name, brick_path="", pool_name="",
file_path="/etc/glusterfs/glusterd.vol"):
"""
Set up or clean up glusterfs environment on localhost
:param is_setup: Boolean value, true for setup, false for cleanup
:param vol_name: gluster created volume name
:param brick_path: Dir for create glusterfs
:return: ip_addr or nothing
"""
try:
utils_path.find_command("gluster")
except utils_path.CmdNotFoundError:
raise exceptions.TestSkipError("Missing command 'gluster'")
if not brick_path:
tmpdir = data_dir.get_tmp_dir()
brick_path = os.path.join(tmpdir, pool_name)
if is_setup:
ip_addr = get_host_ipv4_addr()
gluster.add_rpc_insecure(file_path)
gluster.glusterd_start()
logging.debug("finish start gluster")
gluster.gluster_vol_create(vol_name, ip_addr, brick_path, force=True)
gluster.gluster_allow_insecure(vol_name)
gluster.gluster_nfs_disable(vol_name)
logging.debug("The contents of %s: \n%s", file_path, open(file_path).read())
logging.debug("finish vol create in gluster")
return ip_addr
else:
gluster.gluster_vol_stop(vol_name, True)
gluster.gluster_vol_delete(vol_name)
gluster.gluster_brick_delete(brick_path)
return ""
def define_pool(pool_name, pool_type, pool_target, cleanup_flag, **kwargs):
"""
To define a given type pool(Support types: 'dir', 'netfs', logical',
iscsi', 'gluster', 'disk' and 'fs').
:param pool_name: Name of the pool
:param pool_type: Type of the pool
:param pool_target: Target for underlying storage
:param cleanup_flag: A list contains 3 booleans and 1 string stands for
need_cleanup_nfs, need_cleanup_iscsi,
need_cleanup_logical, selinux_bak and
need_cleanup_gluster
:param kwargs: key words for special pool define. eg, glusterfs pool
source path and source name, etc
"""
extra = ""
vg_name = pool_name
cleanup_nfs = False
cleanup_iscsi = False
cleanup_logical = False
selinux_bak = ""
cleanup_gluster = False
if not os.path.exists(pool_target) and pool_type != "gluster":
os.mkdir(pool_target)
if pool_type == "dir":
pass
elif pool_type == "netfs":
# Set up NFS server without mount
res = setup_or_cleanup_nfs(True, pool_target, False)
nfs_path = res["export_dir"]
selinux_bak = res["selinux_status_bak"]
cleanup_nfs = True
extra = "--source-host %s --source-path %s" % ('127.0.0.1',
nfs_path)
elif pool_type == "logical":
# Create vg by using iscsi device
lv_utils.vg_create(vg_name, setup_or_cleanup_iscsi(True))
cleanup_iscsi = True
cleanup_logical = True
extra = "--source-name %s" % vg_name
elif pool_type == "iscsi":
# Set up iscsi target without login
iscsi_target, _ = setup_or_cleanup_iscsi(True, False)
cleanup_iscsi = True
extra = "--source-host %s --source-dev %s" % ('127.0.0.1',
iscsi_target)
elif pool_type == "disk":
# Set up iscsi target and login
device_name = setup_or_cleanup_iscsi(True)
cleanup_iscsi = True
# Create a partition to make sure disk pool can start
mk_label(device_name)
mk_part(device_name)
extra = "--source-dev %s" % device_name
elif pool_type == "fs":
# Set up iscsi target and login
device_name = setup_or_cleanup_iscsi(True)
cleanup_iscsi = True
# Format disk to make sure fs pool can start
source_format = kwargs.get('source_format', 'ext4')
mkfs(device_name, source_format)
extra = "--source-dev %s --source-format %s" % (device_name, source_format)
elif pool_type == "gluster":
gluster_source_path = kwargs.get('gluster_source_path')
gluster_source_name = kwargs.get('gluster_source_name')
gluster_file_name = kwargs.get('gluster_file_name')
gluster_file_type = kwargs.get('gluster_file_type')
gluster_file_size = kwargs.get('gluster_file_size')
gluster_vol_number = kwargs.get('gluster_vol_number')
# Prepare gluster service and create volume
hostip = setup_or_cleanup_gluster(True, gluster_source_name,
pool_name=pool_name)
logging.debug("hostip is %s", hostip)
# create image in gluster volume
file_path = "gluster://%s/%s" % (hostip, gluster_source_name)
for i in range(gluster_vol_number):
file_name = "%s_%d" % (gluster_file_name, i)
process.run("qemu-img create -f %s %s/%s %s" %
(gluster_file_type, file_path, file_name,
gluster_file_size))
cleanup_gluster = True
extra = "--source-host %s --source-path %s --source-name %s" % \
(hostip, gluster_source_path, gluster_source_name)
elif pool_type in ["scsi", "mpath", "rbd", "sheepdog"]:
raise exceptions.TestSkipError(
"Pool type '%s' has not yet been supported in the test." %
pool_type)
else:
raise exceptions.TestFail("Invalid pool type: '%s'." % pool_type)
# Mark the clean up flags
cleanup_flag[0] = cleanup_nfs
cleanup_flag[1] = cleanup_iscsi
cleanup_flag[2] = cleanup_logical
cleanup_flag[3] = selinux_bak
cleanup_flag[4] = cleanup_gluster
try:
result = virsh.pool_define_as(pool_name, pool_type, pool_target, extra,
ignore_status=True)
except process.CmdError:
logging.error("Define '%s' type pool fail.", pool_type)
return result
def verify_virsh_console(session, user, passwd, timeout=10, debug=False):
"""
Run commands in console session.
"""
log = ""
console_cmd = "cat /proc/cpuinfo"
try:
while True:
match, text = session.read_until_last_line_matches(
[r"[E|e]scape character is", r"login:",
r"[P|p]assword:", session.prompt],
timeout, internal_timeout=1)
if match == 0:
if debug:
logging.debug("Got '^]', sending '\\n'")
session.sendline()
elif match == 1:
if debug:
logging.debug("Got 'login:', sending '%s'", user)
session.sendline(user)
elif match == 2:
if debug:
logging.debug("Got 'Password:', sending '%s'", passwd)
session.sendline(passwd)
elif match == 3:
if debug:
logging.debug("Got Shell prompt -- logged in")
break
status, output = session.cmd_status_output(console_cmd)
logging.info("output of command:\n%s", output)
session.close()
except (aexpect.ShellError,
aexpect.ExpectError) as detail:
log = session.get_output()
logging.error("Verify virsh console failed:\n%s\n%s", detail, log)
session.close()
return False
if not re.search("processor", output):
logging.error("Verify virsh console failed: Result does not match.")
return False
return True
def pci_label_from_address(address_dict, radix=10):
"""
Generate a pci label from a dict of address.
:param address_dict: A dict contains domain, bus, slot and function.
:param radix: The radix of your data in address_dict.
Example:
::
address_dict = {'domain': '0x0000', 'bus': '0x08', 'slot': '0x10', 'function': '0x0'}
radix = 16
return = pci_0000_08_10_0
"""
try:
domain = int(address_dict['domain'], radix)
bus = int(address_dict['bus'], radix)
slot = int(address_dict['slot'], radix)
function = int(address_dict['function'], radix)
except (TypeError, KeyError) as detail:
raise exceptions.TestError(detail)
pci_label = ("pci_%04x_%02x_%02x_%01x" % (domain, bus, slot, function))
return pci_label
def mk_label(disk, label="msdos", session=None):
"""
Set label for disk.
"""
mklabel_cmd = "parted -s %s mklabel %s" % (disk, label)
if session:
session.cmd(mklabel_cmd)
else:
process.run(mklabel_cmd)
def mk_part(disk, size="100M", fs_type='ext4', session=None):
"""
Create a partition for disk
"""
# TODO: This is just a temporary function to create partition for
# testing usage, should be replaced by a more robust one.
support_lable = ['unknown', 'gpt', 'msdos']
disk_label = 'msdos'
part_type = 'primary'
part_start = '0'
run_cmd = process.system_output
if session:
run_cmd = session.get_command_output
print_cmd = "parted -s %s print" % disk
output = run_cmd(print_cmd)
current_label = re.search(r'Partition Table: (\w+)', output).group(1)
if current_label not in support_lable:
logging.error('Not support create partition on %s disk', current_label)
return
disk_size = re.search(r"Disk %s: (\w+)" % disk, output).group(1)
pat = r'(?P<num>\d+)\s+(?P<start>\S+)\s+(?P<end>\S+)\s+(?P<size>\S+)\s+'
current_parts = [m.groupdict() for m in re.finditer(pat, output)]
mkpart_cmd = "parted -s -a optimal %s" % disk
if current_label == 'unknown':
mkpart_cmd += " mklabel %s" % disk_label
if len(current_parts) > 0:
part_start = current_parts[-1]['end']
part_end = (float(utils_misc.normalize_data_size(part_start,
factor='1000')) +
float(utils_misc.normalize_data_size(size, factor='1000')))
# Deal with msdos disk
if current_label == 'msdos':
if len(current_parts) == 3:
extended_cmd = " mkpart extended %s %s" % (part_start, disk_size)
run_cmd(mkpart_cmd + extended_cmd)
if len(current_parts) > 2:
part_type = 'logical'
mkpart_cmd += ' mkpart %s %s %s %s' % (part_type, fs_type, part_start,
part_end)
run_cmd(mkpart_cmd)
def mkfs(partition, fs_type, options="", session=None):
"""
Force to make a file system on the partition
"""
force_option = ''
if fs_type in ['ext2', 'ext3', 'ext4', 'ntfs']:
force_option = '-F'
elif fs_type in ['fat', 'vfat', 'msdos']:
force_option = '-I'
elif fs_type in ['xfs', 'btrfs']:
force_option = '-f'
mkfs_cmd = "mkfs.%s %s %s %s" % (fs_type, force_option, partition, options)
if session:
session.cmd(mkfs_cmd)
else:
process.run(mkfs_cmd)
def get_parts_list(session=None):
"""
Get all partition lists.
"""
parts_cmd = "cat /proc/partitions"
if session:
_, parts_out = session.cmd_status_output(parts_cmd)
else:
parts_out = process.run(parts_cmd).stdout
parts = []
if parts_out:
for line in parts_out.rsplit("\n"):
if line.startswith("major") or line == "":
continue
parts_line = line.rsplit()
if len(parts_line) == 4:
parts.append(parts_line[3])
logging.debug("Find parts: %s" % parts)
return parts
def yum_install(pkg_list, session=None):
"""
Try to install packages on system
"""
if not isinstance(pkg_list, list):
raise exceptions.TestError("Parameter error.")
yum_cmd = "rpm -q {0} || yum -y install {0}"
for pkg in pkg_list:
if session:
status = session.cmd_status(yum_cmd.format(pkg))
else:
status = process.run(yum_cmd.format(pkg),
shell=True).exit_status
if status:
raise exceptions.TestFail("Failed to install package: %s"
% pkg)
def check_actived_pool(pool_name):
"""
Check if pool_name exist in active pool list
"""
sp = libvirt_storage.StoragePool()
if not sp.pool_exists(pool_name):
raise exceptions.TestFail("Can't find pool %s" % pool_name)
if not sp.is_pool_active(pool_name):
raise exceptions.TestFail("Pool %s is not active." % pool_name)
logging.debug("Find active pool %s", pool_name)
return True
class PoolVolumeTest(object):
"""Test class for storage pool or volume"""
def __init__(self, test, params):
self.tmpdir = test.tmpdir
self.params = params
self.selinux_bak = ""
def cleanup_pool(self, pool_name, pool_type, pool_target, emulated_image,
**kwargs):
"""
Delete vols, destroy the created pool and restore the env
"""
sp = libvirt_storage.StoragePool()
source_format = kwargs.get('source_format')
source_name = kwargs.get('source_name')
device_name = kwargs.get('device_name', "/DEV/EXAMPLE")
try:
if sp.pool_exists(pool_name):
pv = libvirt_storage.PoolVolume(pool_name)
if pool_type in ["dir", "netfs", "logical", "disk"]:
if sp.is_pool_active(pool_name):
vols = pv.list_volumes()
for vol in vols:
# Ignore failed deletion here for deleting pool
pv.delete_volume(vol)
if not sp.delete_pool(pool_name):
raise exceptions.TestFail(
"Delete pool %s failed" % pool_name)
finally:
if pool_type == "netfs" and source_format != 'glusterfs':
nfs_server_dir = self.params.get("nfs_server_dir", "nfs-server")
nfs_path = os.path.join(self.tmpdir, nfs_server_dir)
setup_or_cleanup_nfs(is_setup=False, export_dir=nfs_path,
restore_selinux=self.selinux_bak)
if os.path.exists(nfs_path):
shutil.rmtree(nfs_path)
if pool_type == "logical":
cmd = "pvs |grep vg_logical|awk '{print $1}'"
pv = process.system_output(cmd, shell=True)
# Cleanup logical volume anyway
process.run("vgremove -f vg_logical", ignore_status=True)
process.run("pvremove %s" % pv, ignore_status=True)
# These types used iscsi device
# If we did not provide block device
if (pool_type in ["logical", "fs", "disk"] and
device_name.count("EXAMPLE")):
setup_or_cleanup_iscsi(is_setup=False,
emulated_image=emulated_image)
# Used iscsi device anyway
if pool_type in ["iscsi", "scsi"]:
setup_or_cleanup_iscsi(is_setup=False,
emulated_image=emulated_image)
if pool_type == "scsi":
scsi_xml_file = self.params.get("scsi_xml_file", "")
if os.path.exists(scsi_xml_file):
os.remove(scsi_xml_file)
if pool_type in ["dir", "fs", "netfs"]:
pool_target = os.path.join(self.tmpdir, pool_target)
if os.path.exists(pool_target):
shutil.rmtree(pool_target)
if pool_type == "gluster" or source_format == 'glusterfs':
setup_or_cleanup_gluster(False, source_name,
pool_name=pool_name)
def pre_pool(self, pool_name, pool_type, pool_target, emulated_image,
**kwargs):
"""
Prepare(define or create) the specific type pool
:param pool_name: created pool name
:param pool_type: dir, disk, logical, fs, netfs or else
:param pool_target: target of storage pool
:param emulated_image: use an image file to simulate a scsi disk
it could be used for disk, logical pool, etc
:param kwargs: key words for specific pool
"""
extra = ""
image_size = kwargs.get('image_size', "100M")
source_format = kwargs.get('source_format')
source_name = kwargs.get('source_name', None)
persistent = kwargs.get('persistent', False)
device_name = kwargs.get('device_name', "/DEV/EXAMPLE")
adapter_type = kwargs.get('pool_adapter_type', 'scsi_host')
pool_wwnn = kwargs.get('pool_wwnn', None)
pool_wwpn = kwargs.get('pool_wwpn', None)
# If tester does not provide block device, creating one
if (device_name.count("EXAMPLE") and
pool_type in ["disk", "fs", "logical"]):
device_name = setup_or_cleanup_iscsi(is_setup=True,
emulated_image=emulated_image,
image_size=image_size)
if pool_type == "dir":