From 64385a74b7e85e52d2884ea90e223d6c177c98ce Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Thu, 19 Nov 2020 11:06:37 +0100 Subject: [PATCH 01/14] Add module to configure lvm on top of RAID1 --- schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml | 89 +++++++++++++++++-- test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml | 85 ++++++++++++++++++ .../partitioning/setup_raid1_lvm.pm | 61 +++++++++++++ 3 files changed, 230 insertions(+), 5 deletions(-) create mode 100644 test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml create mode 100644 tests/installation/partitioning/setup_raid1_lvm.pm diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml index 7280c244ff72..64b48fc7c152 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml @@ -6,19 +6,17 @@ description: > vars: RAIDLEVEL: 1 LVM: 1 -test_data: - <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15.yaml + YUI_REST_API: 1 schedule: - - installation/isosize - installation/bootloader_start + - installation/setup_libyui - installation/welcome - installation/accept_license - installation/scc_registration - installation/addon_products_sle - installation/system_role - installation/partitioning - - installation/partitioning_raid - - installation/partitioning_finish + - installation/partitioning/setup_raid1_lvm - installation/installer_timezone - installation/user_settings - installation/user_settings_root @@ -29,6 +27,87 @@ schedule: - installation/await_install - installation/logs_from_installation_system - installation/reboot_after_installation + - installation/teardown_libyui - installation/grub_test - installation/first_boot - console/validate_lvm_raid1 +test_data: + disks: + - name: vda + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdb + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdc + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdd + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + mds: + - raid_level: 1 + chunk_size: 64 + device_selection_step: 2 + partition: + role: raw-volume + formatting_options: + should_format: 0 + mounting_options: + should_mount: 0 + - raid_level: 0 + chunk_size: 64 + device_selection_step: 1 + partition: + role: operating-system + formatting_options: + should_format: 1 + filesystem: swap + mounting_options: + should_mount: 1 + lvm: + volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system + lvpath: /dev/root/root + pvname: /dev/md0p1 + raid1: + disk_to_fail: /dev/vdd2 + level: raid1 + name: /dev/md0 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml b/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml new file mode 100644 index 000000000000..6c626561538f --- /dev/null +++ b/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml @@ -0,0 +1,85 @@ +--- +disks: + - name: vda + partitions: + - size: 300mb + role: raw-volume + formatting_options: + should_format: 1 + filesystem: fat + mounting_options: + should_mount: 1 + mount_point: '/boot/efi' + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdb + partitions: + - size: 300mb + role: raw-volume + id: efi + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdc + partitions: + - size: 300mb + role: raw-volume + id: efi + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdd + partitions: + - size: 300mb + role: raw-volume + id: efi + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid +mds: + - raid_level: 1 + chunk_size: 64 + device_selection_step: 2 + partition: + role: raw-volume + formatting_options: + should_format: 0 + mounting_options: + should_mount: 0 + - raid_level: 0 + chunk_size: 64 + device_selection_step: 1 + partition: + role: operating-system + formatting_options: + should_format: 1 + filesystem: swap + mounting_options: + should_mount: 1 +lvm: + volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system + lvpath: /dev/root/root + pvname: /dev/md0p1 +raid1: + disk_to_fail: /dev/vdd2 + level: raid1 + name: /dev/md0 diff --git a/tests/installation/partitioning/setup_raid1_lvm.pm b/tests/installation/partitioning/setup_raid1_lvm.pm new file mode 100644 index 000000000000..c0e2e284d38a --- /dev/null +++ b/tests/installation/partitioning/setup_raid1_lvm.pm @@ -0,0 +1,61 @@ +# SUSE's openQA tests +# +# Copyright © 2020 SUSE LLC +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. This file is offered as-is, +# without any warranty. + +# Summary: The test module uses Expert Partitioning wizard on disks with GPT +# partition table to create RAID using data driven pattern. Data is provided +# by yaml scheduling file. + +# Maintainer: QE YaST + +use parent 'y2_installbase'; + +use strict; +use warnings; + +use testapi; +use version_utils ':VERSION'; +use scheduler 'get_test_suite_data'; + +sub run { + my $test_data = get_test_suite_data(); + + my $partitioner = $testapi::distri->get_expert_partitioner(); + $partitioner->run_expert_partitioner(); + + # Create partitions with the data from yaml scheduling file on first disk + # (see YAML_SCHEDULE openQA variable value). + my $first_disk = $test_data->{disks}[0]; + foreach my $partition (@{$first_disk->{partitions}}) { + $partitioner->add_partition_on_gpt_disk({disk => $first_disk->{name}, partition => $partition}); + } + + # Clone partition table from first disk to all other disks + my $numdisks = scalar(@{$test_data->{disks}}) - 1; + $partitioner->clone_partition_table({disk => $first_disk->{name}, numdisks => $numdisks}); + + # Create RAID partitions with the data from yaml scheduling file + # (see YAML_SCHEDULE openQA variable value). + foreach my $md (@{$test_data->{mds}}) { + $partitioner->add_raid($md); + } + # Add volume groups and logical volumes as per test data + foreach my $vg (@{$test_data->{lvm}->{volume_groups}}) { + $partitioner->add_volume_group($vg); + foreach my $lv (@{$vg->{logical_volumes}}) { + $partitioner->add_logical_volume({ + volume_group => $vg->{name}, + logical_volume => $lv + }); + } + } + + $partitioner->accept_changes_and_press_next(); +} + +1; From 0d40dda2df171918342a146efdd068e15d7e64a1 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Mon, 23 Nov 2020 11:51:26 +0100 Subject: [PATCH 02/14] Enable libyui REST API for lvm+RAID1 test on hyper-v --- .../lvm+raid1_sle15_svirt-hyperv.yaml | 7 +- .../lvm+raid1_sle15_svirt-hyperv.yaml | 55 +++++++++++- .../lvm+raid1_sle15_svirt-hyperv_uefi.yaml | 85 +++++++++++++++++++ 3 files changed, 140 insertions(+), 7 deletions(-) create mode 100644 test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml index dbcb689d7873..2c88f8d45014 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml @@ -6,19 +6,19 @@ description: > vars: RAIDLEVEL: 1 LVM: 1 + YUI_REST_API: 1 test_data: <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml schedule: - - installation/isosize - installation/bootloader_start + - installation/setup_libyui - installation/welcome - installation/accept_license - installation/scc_registration - installation/addon_products_sle - installation/system_role - installation/partitioning - - installation/partitioning_raid - - installation/partitioning_finish + - installation/partitioning/setup_raid1_lvm - installation/installer_timezone - installation/user_settings - installation/user_settings_root @@ -28,6 +28,7 @@ schedule: - installation/start_install - installation/await_install - installation/reboot_after_installation + - installation/teardown_libyui - installation/grub_test - installation/first_boot - console/validate_lvm_raid1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml index f49da292a975..1acdcc5c2f6d 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml @@ -1,10 +1,57 @@ --- disks: - - sda - - sdb - - sdc - - sdd + - name: sda + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: sdb + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: sdc + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: sdd + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid lvm: + volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system lvpath: /dev/root/root pvname: /dev/md0p1 raid1: diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml new file mode 100644 index 000000000000..511c98908e6b --- /dev/null +++ b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml @@ -0,0 +1,85 @@ +--- +disks: + - name: sda + partitions: + - size: 300mb + role: raw-volume + formatting_options: + should_format: 1 + filesystem: fat + mounting_options: + should_mount: 1 + mount_point: '/boot/efi' + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: sdb + partitions: + - size: 300mb + role: raw-volume + id: efi + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: sdc + partitions: + - size: 300mb + role: raw-volume + id: efi + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: sdd + partitions: + - size: 300mb + role: raw-volume + id: efi + - size: 12500mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid +mds: + - raid_level: 1 + chunk_size: 64 + device_selection_step: 2 + partition: + role: raw-volume + formatting_options: + should_format: 0 + mounting_options: + should_mount: 0 + - raid_level: 0 + chunk_size: 64 + device_selection_step: 1 + partition: + role: operating-system + formatting_options: + should_format: 1 + filesystem: swap + mounting_options: + should_mount: 1 +lvm: + volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system + lvpath: /dev/root/root + pvname: /dev/md0p1 +raid1: + disk_to_fail: /dev/sdd2 + level: raid1 + name: /dev/md0 From 33243aa84bc2a6edb8d8601cc6c8c974d32fafb1 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Mon, 23 Nov 2020 14:14:41 +0100 Subject: [PATCH 03/14] Setup libyui REST API on hyper-v backend --- lib/YuiRestClient.pm | 21 +++++++++++++++------ tests/installation/setup_libyui.pm | 20 ++++++++++++++++++-- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/lib/YuiRestClient.pm b/lib/YuiRestClient.pm index 7cec9f56ebdf..d4ff8abe5ad8 100644 --- a/lib/YuiRestClient.pm +++ b/lib/YuiRestClient.pm @@ -17,7 +17,7 @@ use constant API_VERSION => 'v1'; use testapi; use utils 'type_string_slow'; -use Utils::Backends 'is_pvm'; +use Utils::Backends qw(is_pvm is_hyperv); use YuiRestClient::App; @@ -41,20 +41,29 @@ sub get_app { return $app; } -sub setup_libyui { +sub connect_to_app { my $port = get_var('YUI_PORT'); my $host = get_var('YUI_SERVER'); + die "Cannot set libyui REST API server" unless $host; record_info('PORT', "Used port for libyui: $port"); record_info('SERVER', "Connecting to: $host"); - assert_screen('startshell', timeout => 500); - type_string_slow "extend libyui-rest-api\n"; - type_string_slow "exit\n"; my $app = YuiRestClient::App->new({port => $port, host => $host, api_version => API_VERSION}); # As we start installer, REST API is not instantly available $app->connect(timeout => 500, interval => 10); set_app($app); } +sub process_start_shell { + assert_screen('startshell', timeout => 500); + type_string_slow "extend libyui-rest-api\n"; + type_string_slow "exit\n"; +} + +sub setup_libyui { + process_start_shell; + connect_to_app; +} + sub teardown_libyui { assert_screen('startshell', timeout => 100); type_string_slow "exit\n"; @@ -81,7 +90,7 @@ sub set_libyui_backend_vars { } elsif (is_pvm) { $server = get_var('SUT_IP'); } - die "Cannot set libyui REST API server" unless $server; + set_var('YUI_SERVER', $server); } diff --git a/tests/installation/setup_libyui.pm b/tests/installation/setup_libyui.pm index ec95a542747c..f1ff0f2e619b 100644 --- a/tests/installation/setup_libyui.pm +++ b/tests/installation/setup_libyui.pm @@ -21,14 +21,30 @@ use strict; use warnings; use base "installbasetest"; -use Utils::Backends 'is_pvm'; +use Utils::Backends qw(is_pvm is_hyperv); +use testapi; use YuiRestClient; +use YuiRestClient::Wait; sub run { # We setup libyui in bootloader on powerVM return if is_pvm; - YuiRestClient::setup_libyui(); + YuiRestClient::process_start_shell(); + + if (is_hyperv) { + my $svirt = select_console('svirt'); + my $name = $svirt->name; + my $cmd = "powershell -Command \"Get-VM -Name $name | Select -ExpandProperty Networkadapters | Select IPAddresses\""; + my $ip = YuiRestClient::Wait::wait_until(object => sub { + my $ip = $svirt->get_cmd_output($cmd); + return $+{ip} if ($ip =~ /(?(\d+\.){3}\d+)/i); + }, timeout => 500, interval => 30); + set_var('YUI_SERVER', $ip); + select_console('sut', await_console => 0); + } + + YuiRestClient::connect_to_app(); } 1; From 285a652520d82812ab16bbe968dc50a3ef67ec56 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Mon, 23 Nov 2020 16:37:21 +0100 Subject: [PATCH 04/14] Trim extraboot parameters string --- lib/bootloader_setup.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/bootloader_setup.pm b/lib/bootloader_setup.pm index 0c400a036a33..7e14c9ff3626 100644 --- a/lib/bootloader_setup.pm +++ b/lib/bootloader_setup.pm @@ -314,7 +314,7 @@ sub select_bootmenu_option { } sub get_extra_boot_params { - my @params = split ' ', get_var('EXTRABOOTPARAMS', ''); + my @params = split ' ', trim(get_var('EXTRABOOTPARAMS', '')); return @params; } From afc1ba689dae5d099f1d46e88d3ff3fd231c84d2 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Wed, 25 Nov 2020 11:35:22 +0100 Subject: [PATCH 05/14] Setup libyui on svirt backend On svirt we cannot know ip address from the hypervisor, so have to wait till installer boots, and get ip address from there. We use bridge netwroking there, so that ip address is accessible from the worker. --- .../lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml | 4 +++- .../lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml | 4 +++- tests/installation/setup_libyui.pm | 16 ++++++++++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml index 3c273287c9de..3abab1be3d9d 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml @@ -6,11 +6,12 @@ description: > vars: RAIDLEVEL: 1 LVM: 1 + YUI_REST_API: 1 test_data: <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml schedule: - - installation/isosize - installation/bootloader_start + - installation/setup_libyui - installation/welcome - installation/accept_license - installation/scc_registration @@ -29,6 +30,7 @@ schedule: - installation/await_install - installation/logs_from_installation_system - installation/reboot_after_installation + - installation/teardown_libyui - installation/grub_test - installation/first_boot - console/validate_lvm_raid1 diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml index 4f8b06ef5318..57c901720dde 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml @@ -6,11 +6,12 @@ description: > vars: RAIDLEVEL: 1 LVM: 1 + YUI_REST_API: 1 test_data: <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml schedule: - - installation/isosize - installation/bootloader_start + - installation/setup_libyui - installation/welcome - installation/accept_license - installation/scc_registration @@ -28,5 +29,6 @@ schedule: - installation/await_install - installation/logs_from_installation_system - installation/reboot_after_installation + - installation/teardown_libyui - installation/first_boot - console/validate_lvm_raid1 diff --git a/tests/installation/setup_libyui.pm b/tests/installation/setup_libyui.pm index f1ff0f2e619b..9618eb0a0718 100644 --- a/tests/installation/setup_libyui.pm +++ b/tests/installation/setup_libyui.pm @@ -27,6 +27,9 @@ use testapi; use YuiRestClient; use YuiRestClient::Wait; +my $ip_regexp = qr/(?(\d+\.){3}\d+)/i; +my $boot_timeout = 500; + sub run { # We setup libyui in bootloader on powerVM return if is_pvm; @@ -38,10 +41,19 @@ sub run { my $cmd = "powershell -Command \"Get-VM -Name $name | Select -ExpandProperty Networkadapters | Select IPAddresses\""; my $ip = YuiRestClient::Wait::wait_until(object => sub { my $ip = $svirt->get_cmd_output($cmd); - return $+{ip} if ($ip =~ /(?(\d+\.){3}\d+)/i); - }, timeout => 500, interval => 30); + return $+{ip} if ($ip =~ $ip_regexp); + }, timeout => $boot_timeout, interval => 30); set_var('YUI_SERVER', $ip); select_console('sut', await_console => 0); + } elsif (check_var('BACKEND', 'svirt')) { + assert_screen('yast-still-running', $boot_timeout); + select_console('install-shell'); + my $ip = YuiRestClient::Wait::wait_until(object => sub { + my $ip = script_output('ip -o -4 addr list | sed -n 2p | awk \'{print $4}\' | cut -d/ -f1', proceed_on_failure => 1); + return $+{ip} if ($ip =~ $ip_regexp); + }); + set_var('YUI_SERVER', $ip); + select_console('installation'); } YuiRestClient::connect_to_app(); From 4c07a35b968fd4f013a08aacb6363257d724dd91 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Wed, 25 Nov 2020 14:08:29 +0100 Subject: [PATCH 06/14] Use libyui REST API in lvm+RAID1 on svirt backend --- .../lvm+raid1_sle15_svirt-xen-hvm.yaml | 3 +- .../lvm+raid1_sle15_svirt-xen-pv.yaml | 3 +- .../lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml | 75 ++++++++++++++++++- 3 files changed, 73 insertions(+), 8 deletions(-) diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml index 3abab1be3d9d..817d3a6cea26 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml @@ -18,8 +18,7 @@ schedule: - installation/addon_products_sle - installation/system_role - installation/partitioning - - installation/partitioning_raid - - installation/partitioning_finish + - installation/partitioning/setup_raid1_lvm - installation/installer_timezone - installation/user_settings - installation/user_settings_root diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml index 57c901720dde..3c1def0f45e4 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml @@ -18,8 +18,7 @@ schedule: - installation/addon_products_sle - installation/system_role - installation/partitioning - - installation/partitioning_raid - - installation/partitioning_finish + - installation/partitioning/setup_raid1_lvm - installation/installer_timezone - installation/user_settings - installation/user_settings_root diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml index 11c23eb4d583..55535336fa77 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml @@ -1,10 +1,77 @@ --- disks: - - xvdb - - xvdbc - - xvdd - - xvde + - name: xvdb + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: xvdc + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: xvdd + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: xvde + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid +mds: + - raid_level: 1 + chunk_size: 64 + device_selection_step: 2 + partition: + role: raw-volume + formatting_options: + should_format: 0 + mounting_options: + should_mount: 0 + - raid_level: 0 + chunk_size: 64 + device_selection_step: 1 + partition: + role: operating-system + formatting_options: + should_format: 1 + filesystem: swap + mounting_options: + should_mount: 1 lvm: + volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system lvpath: /dev/root/root pvname: /dev/md0p1 raid1: From 333e09d14ffc1ea6833c4e4969d898bf4db535d9 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Wed, 25 Nov 2020 14:32:25 +0100 Subject: [PATCH 07/14] Enable libyui REST API in lvm+RAID on opensuse Uniting test data for sles and openSUSE, as it's same and reuse it in both schedules. --- .../yast/lvm_raid1/lvm+raid1_opensuse.yaml | 9 +- schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml | 82 +------------------ test_data/yast/lvm_raid1/lvm+raid1.yaml | 80 ++++++++++++++++++ .../yast/lvm_raid1/lvm+raid1_opensuse.yaml | 13 --- test_data/yast/lvm_raid1/lvm+raid1_sle15.yaml | 13 --- 5 files changed, 87 insertions(+), 110 deletions(-) create mode 100644 test_data/yast/lvm_raid1/lvm+raid1.yaml delete mode 100644 test_data/yast/lvm_raid1/lvm+raid1_opensuse.yaml delete mode 100644 test_data/yast/lvm_raid1/lvm+raid1_sle15.yaml diff --git a/schedule/yast/lvm_raid1/lvm+raid1_opensuse.yaml b/schedule/yast/lvm_raid1/lvm+raid1_opensuse.yaml index e73c2a15c34b..0cb4670160df 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_opensuse.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_opensuse.yaml @@ -6,19 +6,19 @@ description: > vars: RAIDLEVEL: 1 LVM: 1 + YUI_REST_API: 1 test_data: - <<: !include test_data/yast/lvm_raid1/lvm+raid1_opensuse.yaml + <<: !include test_data/yast/lvm_raid1/lvm+raid1.yaml schedule: - - installation/isosize - installation/bootloader_start + - installation/setup_libyui - installation/welcome - installation/online_repos - installation/installation_mode - installation/logpackages - installation/system_role - installation/partitioning - - installation/partitioning_raid - - installation/partitioning_finish + - installation/partitioning/setup_raid1_lvm - installation/installer_timezone - installation/user_settings - installation/resolve_dependency_issues @@ -28,6 +28,7 @@ schedule: - installation/await_install - installation/logs_from_installation_system - installation/reboot_after_installation + - installation/teardown_libyui - installation/grub_test - installation/first_boot - console/validate_lvm_raid1 diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml index 64b48fc7c152..6c31d114e6e0 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15.yaml @@ -7,6 +7,8 @@ vars: RAIDLEVEL: 1 LVM: 1 YUI_REST_API: 1 +test_data: + <<: !include test_data/yast/lvm_raid1/lvm+raid1.yaml schedule: - installation/bootloader_start - installation/setup_libyui @@ -31,83 +33,3 @@ schedule: - installation/grub_test - installation/first_boot - console/validate_lvm_raid1 -test_data: - disks: - - name: vda - partitions: - - size: 2mb - id: bios-boot - role: raw-volume - - size: 8000mb - role: raw-volume - id: linux-raid - - size: 100mb - role: raw-volume - id: linux-raid - - name: vdb - partitions: - - size: 2mb - id: bios-boot - role: raw-volume - - size: 8000mb - role: raw-volume - id: linux-raid - - size: 100mb - role: raw-volume - id: linux-raid - - name: vdc - partitions: - - size: 2mb - id: bios-boot - role: raw-volume - - size: 8000mb - role: raw-volume - id: linux-raid - - size: 100mb - role: raw-volume - id: linux-raid - - name: vdd - partitions: - - size: 2mb - id: bios-boot - role: raw-volume - - size: 8000mb - role: raw-volume - id: linux-raid - - size: 100mb - role: raw-volume - id: linux-raid - mds: - - raid_level: 1 - chunk_size: 64 - device_selection_step: 2 - partition: - role: raw-volume - formatting_options: - should_format: 0 - mounting_options: - should_mount: 0 - - raid_level: 0 - chunk_size: 64 - device_selection_step: 1 - partition: - role: operating-system - formatting_options: - should_format: 1 - filesystem: swap - mounting_options: - should_mount: 1 - lvm: - volume_groups: - - name: vg-system - devices: - - /dev/md0p1 - logical_volumes: - - name: lv-root - role: operating-system - lvpath: /dev/root/root - pvname: /dev/md0p1 - raid1: - disk_to_fail: /dev/vdd2 - level: raid1 - name: /dev/md0 diff --git a/test_data/yast/lvm_raid1/lvm+raid1.yaml b/test_data/yast/lvm_raid1/lvm+raid1.yaml new file mode 100644 index 000000000000..72718363e1f9 --- /dev/null +++ b/test_data/yast/lvm_raid1/lvm+raid1.yaml @@ -0,0 +1,80 @@ +--- +disks: + - name: vda + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdb + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdc + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid + - name: vdd + partitions: + - size: 2mb + id: bios-boot + role: raw-volume + - size: 8000mb + role: raw-volume + id: linux-raid + - size: 100mb + role: raw-volume + id: linux-raid +mds: + - raid_level: 1 + chunk_size: 64 + device_selection_step: 2 + partition: + role: raw-volume + formatting_options: + should_format: 0 + mounting_options: + should_mount: 0 + - raid_level: 0 + chunk_size: 64 + device_selection_step: 1 + partition: + role: operating-system + formatting_options: + should_format: 1 + filesystem: swap + mounting_options: + should_mount: 1 +lvm: + volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system + lvpath: /dev/root/root + pvname: /dev/md0p1 +raid1: + disk_to_fail: /dev/vdd2 + level: raid1 + name: /dev/md0 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_opensuse.yaml b/test_data/yast/lvm_raid1/lvm+raid1_opensuse.yaml deleted file mode 100644 index 878870c4f0f2..000000000000 --- a/test_data/yast/lvm_raid1/lvm+raid1_opensuse.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -disks: - - vda - - vdb - - vdc - - vdd -lvm: - lvpath: /dev/root/root - pvname: /dev/md0p1 -raid1: - disk_to_fail: /dev/vdd2 - level: raid1 - name: /dev/md0 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15.yaml deleted file mode 100644 index 878870c4f0f2..000000000000 --- a/test_data/yast/lvm_raid1/lvm+raid1_sle15.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -disks: - - vda - - vdb - - vdc - - vdd -lvm: - lvpath: /dev/root/root - pvname: /dev/md0p1 -raid1: - disk_to_fail: /dev/vdd2 - level: raid1 - name: /dev/md0 From 8c532dd4a9e6e3a68735d0054125d0367ffce193 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Wed, 25 Nov 2020 14:49:08 +0100 Subject: [PATCH 08/14] Reuse common mds and lvm part in lvm+RAID1 test data --- test_data/yast/lvm_raid1/lvm+raid1.yaml | 34 +++---------------- test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml | 10 ++++++ test_data/yast/lvm_raid1/lvm+raid1_mds.yaml | 20 +++++++++++ .../lvm+raid1_sle15_svirt-hyperv.yaml | 14 +++----- .../lvm+raid1_sle15_svirt-hyperv_uefi.yaml | 34 +++---------------- .../lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml | 34 +++---------------- test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml | 34 +++---------------- 7 files changed, 50 insertions(+), 130 deletions(-) create mode 100644 test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml create mode 100644 test_data/yast/lvm_raid1/lvm+raid1_mds.yaml diff --git a/test_data/yast/lvm_raid1/lvm+raid1.yaml b/test_data/yast/lvm_raid1/lvm+raid1.yaml index 72718363e1f9..12733e74729a 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1.yaml @@ -1,4 +1,8 @@ --- +mds: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_mds.yaml +lvm: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml disks: - name: vda partitions: @@ -44,36 +48,6 @@ disks: - size: 100mb role: raw-volume id: linux-raid -mds: - - raid_level: 1 - chunk_size: 64 - device_selection_step: 2 - partition: - role: raw-volume - formatting_options: - should_format: 0 - mounting_options: - should_mount: 0 - - raid_level: 0 - chunk_size: 64 - device_selection_step: 1 - partition: - role: operating-system - formatting_options: - should_format: 1 - filesystem: swap - mounting_options: - should_mount: 1 -lvm: - volume_groups: - - name: vg-system - devices: - - /dev/md0p1 - logical_volumes: - - name: lv-root - role: operating-system - lvpath: /dev/root/root - pvname: /dev/md0p1 raid1: disk_to_fail: /dev/vdd2 level: raid1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml b/test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml new file mode 100644 index 000000000000..a5a818802c42 --- /dev/null +++ b/test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml @@ -0,0 +1,10 @@ +--- +volume_groups: + - name: vg-system + devices: + - /dev/md0p1 + logical_volumes: + - name: lv-root + role: operating-system +lvpath: /dev/vg-system/lv-root +pvname: /dev/md0p1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_mds.yaml b/test_data/yast/lvm_raid1/lvm+raid1_mds.yaml new file mode 100644 index 000000000000..f09baecc2995 --- /dev/null +++ b/test_data/yast/lvm_raid1/lvm+raid1_mds.yaml @@ -0,0 +1,20 @@ +--- +- raid_level: 1 + chunk_size: 64 + device_selection_step: 2 + partition: + role: raw-volume + formatting_options: + should_format: 0 + mounting_options: + should_mount: 0 +- raid_level: 0 + chunk_size: 64 + device_selection_step: 1 + partition: + role: operating-system + formatting_options: + should_format: 1 + filesystem: swap + mounting_options: + should_mount: 1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml index 1acdcc5c2f6d..3bfd2b8fbecf 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml @@ -1,4 +1,8 @@ --- +mds: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_mds.yaml +lvm: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml disks: - name: sda partitions: @@ -44,16 +48,6 @@ disks: - size: 100mb role: raw-volume id: linux-raid -lvm: - volume_groups: - - name: vg-system - devices: - - /dev/md0p1 - logical_volumes: - - name: lv-root - role: operating-system - lvpath: /dev/root/root - pvname: /dev/md0p1 raid1: disk_to_fail: /dev/sdd2 level: raid1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml index 511c98908e6b..adc69771f43a 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml @@ -1,4 +1,8 @@ --- +mds: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_mds.yaml +lvm: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml disks: - name: sda partitions: @@ -49,36 +53,6 @@ disks: - size: 100mb role: raw-volume id: linux-raid -mds: - - raid_level: 1 - chunk_size: 64 - device_selection_step: 2 - partition: - role: raw-volume - formatting_options: - should_format: 0 - mounting_options: - should_mount: 0 - - raid_level: 0 - chunk_size: 64 - device_selection_step: 1 - partition: - role: operating-system - formatting_options: - should_format: 1 - filesystem: swap - mounting_options: - should_mount: 1 -lvm: - volume_groups: - - name: vg-system - devices: - - /dev/md0p1 - logical_volumes: - - name: lv-root - role: operating-system - lvpath: /dev/root/root - pvname: /dev/md0p1 raid1: disk_to_fail: /dev/sdd2 level: raid1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml index 55535336fa77..ef77a4518e31 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml @@ -1,4 +1,8 @@ --- +mds: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_mds.yaml +lvm: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml disks: - name: xvdb partitions: @@ -44,36 +48,6 @@ disks: - size: 100mb role: raw-volume id: linux-raid -mds: - - raid_level: 1 - chunk_size: 64 - device_selection_step: 2 - partition: - role: raw-volume - formatting_options: - should_format: 0 - mounting_options: - should_mount: 0 - - raid_level: 0 - chunk_size: 64 - device_selection_step: 1 - partition: - role: operating-system - formatting_options: - should_format: 1 - filesystem: swap - mounting_options: - should_mount: 1 -lvm: - volume_groups: - - name: vg-system - devices: - - /dev/md0p1 - logical_volumes: - - name: lv-root - role: operating-system - lvpath: /dev/root/root - pvname: /dev/md0p1 raid1: disk_to_fail: /dev/xvdd2 level: raid1 diff --git a/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml b/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml index 6c626561538f..1ea3b146f2e1 100644 --- a/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml +++ b/test_data/yast/lvm_raid1/lvm+raid1_uefi.yaml @@ -1,4 +1,8 @@ --- +mds: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_mds.yaml +lvm: + <<: !include test_data/yast/lvm_raid1/lvm+raid1_lvm.yaml disks: - name: vda partitions: @@ -49,36 +53,6 @@ disks: - size: 100mb role: raw-volume id: linux-raid -mds: - - raid_level: 1 - chunk_size: 64 - device_selection_step: 2 - partition: - role: raw-volume - formatting_options: - should_format: 0 - mounting_options: - should_mount: 0 - - raid_level: 0 - chunk_size: 64 - device_selection_step: 1 - partition: - role: operating-system - formatting_options: - should_format: 1 - filesystem: swap - mounting_options: - should_mount: 1 -lvm: - volume_groups: - - name: vg-system - devices: - - /dev/md0p1 - logical_volumes: - - name: lv-root - role: operating-system - lvpath: /dev/root/root - pvname: /dev/md0p1 raid1: disk_to_fail: /dev/vdd2 level: raid1 From 68fbf1a59ba9f881c39e415c6ff2896dd59c1171 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Wed, 25 Nov 2020 14:58:06 +0100 Subject: [PATCH 09/14] Rename lvm+RAID1 test data not to have sle15 in the name We have different test data for SLE 12 as partitioning was different there, but for other scenarios we have same setup which differs only for the backends, so name should reflect that this is universal data. --- schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml | 2 +- schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml | 2 +- schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml | 2 +- ...{lvm+raid1_sle15_svirt-hyperv.yaml => lvm+raid1_hyperv.yaml} | 0 ..._sle15_svirt-hyperv_uefi.yaml => lvm+raid1_hyperv_uefi.yaml} | 0 ...{lvm+raid1_sle15_svirt-xen.yaml => lvm+raid1_svirt-xen.yaml} | 0 6 files changed, 3 insertions(+), 3 deletions(-) rename test_data/yast/lvm_raid1/{lvm+raid1_sle15_svirt-hyperv.yaml => lvm+raid1_hyperv.yaml} (100%) rename test_data/yast/lvm_raid1/{lvm+raid1_sle15_svirt-hyperv_uefi.yaml => lvm+raid1_hyperv_uefi.yaml} (100%) rename test_data/yast/lvm_raid1/{lvm+raid1_sle15_svirt-xen.yaml => lvm+raid1_svirt-xen.yaml} (100%) diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml index 2c88f8d45014..538a52b44f39 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml @@ -8,7 +8,7 @@ vars: LVM: 1 YUI_REST_API: 1 test_data: - <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml + <<: !include test_data/yast/lvm_raid1/lvm+raid1_hyperv.yaml schedule: - installation/bootloader_start - installation/setup_libyui diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml index 817d3a6cea26..b2679eba19ce 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-hvm.yaml @@ -8,7 +8,7 @@ vars: LVM: 1 YUI_REST_API: 1 test_data: - <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml + <<: !include test_data/yast/lvm_raid1/lvm+raid1_svirt-xen.yaml schedule: - installation/bootloader_start - installation/setup_libyui diff --git a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml index 3c1def0f45e4..c117f7321939 100644 --- a/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml +++ b/schedule/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen-pv.yaml @@ -8,7 +8,7 @@ vars: LVM: 1 YUI_REST_API: 1 test_data: - <<: !include test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml + <<: !include test_data/yast/lvm_raid1/lvm+raid1_svirt-xen.yaml schedule: - installation/bootloader_start - installation/setup_libyui diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml b/test_data/yast/lvm_raid1/lvm+raid1_hyperv.yaml similarity index 100% rename from test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv.yaml rename to test_data/yast/lvm_raid1/lvm+raid1_hyperv.yaml diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml b/test_data/yast/lvm_raid1/lvm+raid1_hyperv_uefi.yaml similarity index 100% rename from test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-hyperv_uefi.yaml rename to test_data/yast/lvm_raid1/lvm+raid1_hyperv_uefi.yaml diff --git a/test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml b/test_data/yast/lvm_raid1/lvm+raid1_svirt-xen.yaml similarity index 100% rename from test_data/yast/lvm_raid1/lvm+raid1_sle15_svirt-xen.yaml rename to test_data/yast/lvm_raid1/lvm+raid1_svirt-xen.yaml From d4908fd6872574e53acbde0c4b5f7577d8effe12 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Wed, 25 Nov 2020 16:51:41 +0100 Subject: [PATCH 10/14] Process startshell on svirt backend --- tests/installation/reboot_after_installation.pm | 10 +++++++--- tests/installation/teardown_libyui.pm | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/installation/reboot_after_installation.pm b/tests/installation/reboot_after_installation.pm index 5a4e4a6b3ac2..8db7f76f74cd 100644 --- a/tests/installation/reboot_after_installation.pm +++ b/tests/installation/reboot_after_installation.pm @@ -65,9 +65,13 @@ sub run { send_key 'alt-o'; } # Process exiting from the startshell - if (YuiRestClient::is_libyui_rest_api && is_pvm) { - select_console 'powerhmc-ssh', await_console => 0; - YuiRestClient::teardown_libyui(); + if (YuiRestClient::is_libyui_rest_api) { + if (is_pvm) { + select_console 'powerhmc-ssh', await_console => 0; + YuiRestClient::teardown_libyui(); + } elsif (check_var('BACKEND', 'svirt')) { + YuiRestClient::teardown_libyui(); + } } if (get_var('USE_SUPPORT_SERVER') && get_var('USE_SUPPORT_SERVER_PXE_CUSTOMKERNEL')) { # "Press ESC for boot menu" diff --git a/tests/installation/teardown_libyui.pm b/tests/installation/teardown_libyui.pm index 6ac0961854f8..3be89a3fd3c6 100644 --- a/tests/installation/teardown_libyui.pm +++ b/tests/installation/teardown_libyui.pm @@ -27,7 +27,7 @@ use Utils::Backends 'is_pvm'; use YuiRestClient; sub run { - return if is_pvm; + return if is_pvm || check_var('BACKEND', 'svirt'); YuiRestClient::teardown_libyui(); } From de4fb7d8cfb6f399c0685803a2652e47646dc2cc Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Thu, 26 Nov 2020 09:42:50 +0100 Subject: [PATCH 11/14] Define target disks when clone instead of using all On xen-pv we have 5 disks available and should not touch /dev/xvda disk, so we cannot use solution to clone one disk setup to all. We already have data where to clone partitioning, so implementing method to define target disks. --- .../v4_3/ClonePartitionsDialog.pm | 21 +++++++++++++++++-- .../v4_3/ExpertPartitionerController.pm | 9 +++++--- .../v4_3/ExpertPartitionerPage.pm | 2 +- .../partitioning/setup_raid1_lvm.pm | 7 ++++--- 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/lib/Installation/Partitioner/LibstorageNG/v4_3/ClonePartitionsDialog.pm b/lib/Installation/Partitioner/LibstorageNG/v4_3/ClonePartitionsDialog.pm index 68cf14d3fd80..9955a2f1552e 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4_3/ClonePartitionsDialog.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4_3/ClonePartitionsDialog.pm @@ -37,13 +37,30 @@ sub init { return $self; } +sub select_disks { + my ($self, @disks) = @_; + + my @available = $self->{lst_target_disks}->items(); + + foreach my $disk (@disks) { + # Find list item which matches wanted disk + if (my ($lst_item) = grep $_ =~ $disk, @available) { + $self->{lst_target_disks}->select($lst_item); + } + else { + die "$disk cannot be found in the list of target disks"; + } + } + return $self; +} + sub select_all_disks { my ($self) = @_; my @disks = $self->{lst_target_disks}->items(); #Select all disks - foreach (@disks) { - $self->{lst_target_disks}->select($_); + foreach my $disk (@disks) { + $self->{lst_target_disks}->select($disk); } return $self; } diff --git a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm index 7eb883a2c3fa..c1ec004f59b4 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm @@ -85,9 +85,12 @@ sub add_partition_on_gpt_disk { sub clone_partition_table { my ($self, $args) = @_; - $self->get_expert_partitioner_page()->select_disk($args->{disk}); - $self->get_expert_partitioner_page()->open_clone_partition_dialog(); - $self->get_clone_partition_dialog()->select_all_disks(); + $self->get_expert_partitioner_page()->open_clone_partition_dialog($args->{disk}); + if ($args->{target_disks}) { + $self->get_clone_partition_dialog()->select_disks(@{$args->{target_disks}}); + } else { + $self->get_clone_partition_dialog()->select_all_disks(); + } $self->get_clone_partition_dialog()->press_ok(); } diff --git a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm index 3bcf5cf2d486..bbc7550e7fc4 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm @@ -62,7 +62,7 @@ sub open_clone_partition_dialog { my ($self, $disk) = @_; $self->{tree_system_view}->exist(); - $self->select_item_in_system_view_table('Hard Disks'); + $self->select_disk($disk) if $disk; # Cloning option is disabled if any partition is selected, so selecting disk $self->{tbl_devices}->select(row => 0); $self->{menu_bar}->select('&Device|&Clone Partitions to Another Device...'); diff --git a/tests/installation/partitioning/setup_raid1_lvm.pm b/tests/installation/partitioning/setup_raid1_lvm.pm index c0e2e284d38a..540182d01883 100644 --- a/tests/installation/partitioning/setup_raid1_lvm.pm +++ b/tests/installation/partitioning/setup_raid1_lvm.pm @@ -30,14 +30,15 @@ sub run { # Create partitions with the data from yaml scheduling file on first disk # (see YAML_SCHEDULE openQA variable value). - my $first_disk = $test_data->{disks}[0]; + my @disks = @{$test_data->{disks}}; + my $first_disk = $disks[0]; foreach my $partition (@{$first_disk->{partitions}}) { $partitioner->add_partition_on_gpt_disk({disk => $first_disk->{name}, partition => $partition}); } # Clone partition table from first disk to all other disks - my $numdisks = scalar(@{$test_data->{disks}}) - 1; - $partitioner->clone_partition_table({disk => $first_disk->{name}, numdisks => $numdisks}); + my @target_disks = map { $_->{name} } @disks[1 .. $#disks]; + $partitioner->clone_partition_table({disk => $first_disk->{name}, target_disks => \@target_disks}); # Create RAID partitions with the data from yaml scheduling file # (see YAML_SCHEDULE openQA variable value). From 28557f897fe9a2305d509383988ef9482e7e5066 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Thu, 26 Nov 2020 11:13:21 +0100 Subject: [PATCH 12/14] Work around selection UI trigger in the table We have a bug in REST API and menu items do not get enabled, when item in the table is selected. So applying workaround for now and will revert this commit once it's available. --- .../Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm index bbc7550e7fc4..e61a44f6b7f1 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerPage.pm @@ -65,6 +65,9 @@ sub open_clone_partition_dialog { $self->select_disk($disk) if $disk; # Cloning option is disabled if any partition is selected, so selecting disk $self->{tbl_devices}->select(row => 0); + # This is workaround, because row selection doesn't enable clone item in menu bar + send_key("end"); + send_key("home"); $self->{menu_bar}->select('&Device|&Clone Partitions to Another Device...'); return $self; } From 420dde1a3c749ebe1800c4b3404452c216e224d0 Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Thu, 26 Nov 2020 14:02:20 +0100 Subject: [PATCH 13/14] Extract common RAID configuration part We have same steps in raid_gpt and setup_raid1_lvm, so moving common part to the Expert Partitioner controller. --- .../v4/ExpertPartitionerController.pm | 18 ++++++++++++++++++ .../v4_3/ExpertPartitionerController.pm | 17 +++++++++++++++++ tests/installation/partitioning/raid_gpt.pm | 19 +++---------------- .../partitioning/setup_raid1_lvm.pm | 18 ++---------------- 4 files changed, 40 insertions(+), 32 deletions(-) diff --git a/lib/Installation/Partitioner/LibstorageNG/v4/ExpertPartitionerController.pm b/lib/Installation/Partitioner/LibstorageNG/v4/ExpertPartitionerController.pm index fee2090ed6ca..7230caef6427 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4/ExpertPartitionerController.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4/ExpertPartitionerController.pm @@ -228,4 +228,22 @@ sub set_new_partition_size { $self->get_edit_partition_size_page()->press_next(); } +sub setup_raid { + my ($self, $args) = @_; + # Create partitions with the data from yaml scheduling file on first disk + my $first_disk = $args->{disks}[0]; + foreach my $partition (@{$first_disk->{partitions}}) { + $self->add_partition_on_gpt_disk({disk => $first_disk->{name}, partition => $partition}); + } + + # Clone partition table from first disk to all other disks + my $numdisks = scalar(@{$args->{disks}}) - 1; + $self->clone_partition_table({disk => $first_disk->{name}, numdisks => $numdisks}); + + # Create RAID partitions with the data from yaml scheduling file + foreach my $md (@{$args->{mds}}) { + $self->add_raid($md); + } +} + 1; diff --git a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm index c1ec004f59b4..59e5b6fcb0c3 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm @@ -147,4 +147,21 @@ sub add_logical_volume { $self->_finish_partition_creation; } +sub setup_raid { + my ($self, $args) = @_; + # Create partitions with the data from yaml scheduling file on first disk + my @disks = @{$args->{disks}}; + my $first_disk = $disks[0]; + foreach my $partition (@{$first_disk->{partitions}}) { + $self->add_partition_on_gpt_disk({disk => $first_disk->{name}, partition => $partition}); + } + # Clone partition table from first disk to all other disks + my @target_disks = map { $_->{name} } @disks[1 .. $#disks]; + $self->clone_partition_table({disk => $first_disk->{name}, target_disks => \@target_disks}); + # Create RAID partitions with the data from yaml scheduling file + foreach my $md (@{$args->{mds}}) { + $self->add_raid($md); + } +} + 1; diff --git a/tests/installation/partitioning/raid_gpt.pm b/tests/installation/partitioning/raid_gpt.pm index 35ca1884d5c1..190058c95bd9 100644 --- a/tests/installation/partitioning/raid_gpt.pm +++ b/tests/installation/partitioning/raid_gpt.pm @@ -28,22 +28,9 @@ sub run { my $partitioner = $testapi::distri->get_expert_partitioner(); $partitioner->run_expert_partitioner(); - # Create partitions with the data from yaml scheduling file on first disk - # (see YAML_SCHEDULE openQA variable value). - my $first_disk = $test_data->{disks}[0]; - foreach my $partition (@{$first_disk->{partitions}}) { - $partitioner->add_partition_on_gpt_disk({disk => $first_disk->{name}, partition => $partition}); - } - - # Clone partition table from first disk to all other disks - my $numdisks = scalar(@{$test_data->{disks}}) - 1; - $partitioner->clone_partition_table({disk => $first_disk->{name}, numdisks => $numdisks}); - - # Create RAID partitions with the data from yaml scheduling file - # (see YAML_SCHEDULE openQA variable value). - foreach my $md (@{$test_data->{mds}}) { - $partitioner->add_raid($md); - } + # Setup RAID as per test data (see YAML_SCHEDULE and YAML_TEST_DATA openQA variables) + $partitioner->setup_raid($test_data); + $partitioner->accept_changes_and_press_next(); } diff --git a/tests/installation/partitioning/setup_raid1_lvm.pm b/tests/installation/partitioning/setup_raid1_lvm.pm index 540182d01883..c06b1044a686 100644 --- a/tests/installation/partitioning/setup_raid1_lvm.pm +++ b/tests/installation/partitioning/setup_raid1_lvm.pm @@ -28,23 +28,9 @@ sub run { my $partitioner = $testapi::distri->get_expert_partitioner(); $partitioner->run_expert_partitioner(); - # Create partitions with the data from yaml scheduling file on first disk - # (see YAML_SCHEDULE openQA variable value). - my @disks = @{$test_data->{disks}}; - my $first_disk = $disks[0]; - foreach my $partition (@{$first_disk->{partitions}}) { - $partitioner->add_partition_on_gpt_disk({disk => $first_disk->{name}, partition => $partition}); - } - - # Clone partition table from first disk to all other disks - my @target_disks = map { $_->{name} } @disks[1 .. $#disks]; - $partitioner->clone_partition_table({disk => $first_disk->{name}, target_disks => \@target_disks}); + # Setup RAID as per test data (see YAML_SCHEDULE and YAML_TEST_DATA openQA variables) + $partitioner->setup_raid($test_data); - # Create RAID partitions with the data from yaml scheduling file - # (see YAML_SCHEDULE openQA variable value). - foreach my $md (@{$test_data->{mds}}) { - $partitioner->add_raid($md); - } # Add volume groups and logical volumes as per test data foreach my $vg (@{$test_data->{lvm}->{volume_groups}}) { $partitioner->add_volume_group($vg); From 7bd81253996798f4b57d18d0edac4d2393358b6a Mon Sep 17 00:00:00 2001 From: Rodion Iafarov Date: Thu, 26 Nov 2020 14:18:45 +0100 Subject: [PATCH 14/14] Extract common code for lvm configuration using REST API In encrypted_full_lvm and lvm+raid1 we have same code for the configuration of the lvm, which we can move to the controller library. --- .../v4_3/ExpertPartitionerController.pm | 14 ++++++++++++++ .../partitioning/encrypted_full_lvm.pm | 10 +--------- tests/installation/partitioning/setup_raid1_lvm.pm | 13 ++----------- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm index 59e5b6fcb0c3..5784751fbfb4 100644 --- a/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm +++ b/lib/Installation/Partitioner/LibstorageNG/v4_3/ExpertPartitionerController.pm @@ -164,4 +164,18 @@ sub setup_raid { } } +sub setup_lvm { + my ($self, $args) = @_; + + foreach my $vg (@{$args->{volume_groups}}) { + $self->add_volume_group($vg); + foreach my $lv (@{$vg->{logical_volumes}}) { + $self->add_logical_volume({ + volume_group => $vg->{name}, + logical_volume => $lv + }); + } + } +} + 1; diff --git a/tests/installation/partitioning/encrypted_full_lvm.pm b/tests/installation/partitioning/encrypted_full_lvm.pm index 9f84b485f1c9..6bec3b645be5 100644 --- a/tests/installation/partitioning/encrypted_full_lvm.pm +++ b/tests/installation/partitioning/encrypted_full_lvm.pm @@ -38,15 +38,7 @@ sub run { }); } - my $volume_group = $test_data->{lvm}->{volume_groups}[0]; - $partitioner->add_volume_group($volume_group); - - foreach my $logical_volume (@{$volume_group->{logical_volumes}}) { - $partitioner->add_logical_volume({ - volume_group => $volume_group->{name}, - logical_volume => $logical_volume - }); - } + $partitioner->setup_lvm($test_data->{lvm}); $partitioner->accept_changes_and_press_next(); } diff --git a/tests/installation/partitioning/setup_raid1_lvm.pm b/tests/installation/partitioning/setup_raid1_lvm.pm index c06b1044a686..3c7a6b052f53 100644 --- a/tests/installation/partitioning/setup_raid1_lvm.pm +++ b/tests/installation/partitioning/setup_raid1_lvm.pm @@ -30,17 +30,8 @@ sub run { # Setup RAID as per test data (see YAML_SCHEDULE and YAML_TEST_DATA openQA variables) $partitioner->setup_raid($test_data); - - # Add volume groups and logical volumes as per test data - foreach my $vg (@{$test_data->{lvm}->{volume_groups}}) { - $partitioner->add_volume_group($vg); - foreach my $lv (@{$vg->{logical_volumes}}) { - $partitioner->add_logical_volume({ - volume_group => $vg->{name}, - logical_volume => $lv - }); - } - } + # Setup lvm as per test data (see YAML_SCHEDULE and YAML_TEST_DATA openQA variables) + $partitioner->setup_lvm($test_data->{lvm}); $partitioner->accept_changes_and_press_next(); }