-
Notifications
You must be signed in to change notification settings - Fork 270
/
qam_cluster_node.yaml
79 lines (73 loc) · 2.26 KB
/
qam_cluster_node.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
name: qam_cluster_node
description: >
Create a 2 nodes cluster
Schedule for a custom cluster on maintenance.
Some modules are executed or skipped depending the
targeted package.
Some settings are required in the job group or test suite for this schedule to work.
The other settings required in the job group are.
CLUSTER_NAME must be defined for all jobs as a string.
HA_CLUSTER_INIT must be defined to yes in the job that initializes the cluster, and to no
in the remaining cluster node jobs
HA_CLUSTER_JOIN must be defined for the rest of the jobs, and it must contain the
hostname of the job where HA_CLUSTER_INIT is defined to yes
HOSTNAME must be defined to different hostnames for each node.
All jobs with the exception of the parent job must include a PARALLEL_WITH setting
referencing the parent job.
NICTYPE and WORKER_CLASS must be set to 'tap' in the job group directly in qemu based jobs.
And of course, YAML_SCHEDULE must point to this file.
vars:
# Boot qcow2 image generated by create_hdd_ha_textmode
BOOT_HDD_IMAGE: '1'
CTDB_TEST_ROLE: server
DESKTOP: textmode
HA_CLUSTER: '1'
MAINTENANCE: '1'
HDDMODEL: scsi-hd
NICTYPE: tap
# Disable qemu snapshots to avoid unexpected fences in HA tests
QEMU_DISABLE_SNAPSHOTS: '1'
TIMEOUT_SCALE: '3'
# LVM locking daemon used since SLE 15
USE_SUPPORT_SERVER: '1'
VIRTIO_CONSOLE: '0'
# Below have to be entered in the OpenQA UI because it doesn't read this YAML
# HDD_1: SLE-%VERSION%-%ARCH%-Build%BUILD%-HA-BV.qcow2
schedule:
- boot/boot_to_desktop
- ha/wait_barriers
- console/system_prepare
- console/consoletest_setup
- console/check_os_release
- console/hostname
- ha/ha_sle15_workarounds
- ha/firewall_disable
- ha/iscsi_client
- ha/watchdog
- '{{cluster_setup}}'
- ha/check_hawk
- ha/dlm
- ha/clvmd_lvmlockd
- ha/cluster_md
- ha/vg
- ha/filesystem
- ha/drbd_passive
- ha/filesystem
- ha/ctdb
- ha/haproxy
- ha/fencing
- '{{boot_to_desktop_node01}}'
- ha/check_after_reboot
- ha/remove_node
- ha/check_logs
conditional_schedule:
cluster_setup:
HA_CLUSTER_INIT:
yes:
- ha/ha_cluster_init
no:
- ha/ha_cluster_join
boot_to_desktop_node01:
HA_CLUSTER_INIT:
yes:
- boot/boot_to_desktop