Skip to content
This repository has been archived by the owner on Jul 25, 2022. It is now read-only.

Commit

Permalink
Get EFS Testing working with Vagrant provisioning (#697)
Browse files Browse the repository at this point in the history
Now that SSI tests are working with vagrant and libvirt, use the same process to get the EFS tests working.

There is some strangeness with how EFS tests use the same network for management and LNet; fix this as well.


Signed-off-by: Joe Grund <jgrund@whamcloud.io>
  • Loading branch information
jgrund committed Aug 2, 2018
1 parent 075a807 commit db4cb43
Show file tree
Hide file tree
Showing 11 changed files with 316 additions and 78 deletions.
5 changes: 2 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ destroy_cluster: Vagrantfile
if rpm -q vagrant-libvirt || \
rpm -q sclo-vagrant1-vagrant-libvirt; then \
export LIBVIRT_DEFAULT_URI=qemu:///system; \
for net in intel-manager-for-lustre{0,1,2,3} vagrant-libvirt; do \
for net in integrated-manager-for-lustre{0,1,2,3} vagrant-libvirt; do \
virsh net-destroy $$net || true; \
virsh net-undefine $$net || true; \
done; \
Expand Down Expand Up @@ -273,8 +273,7 @@ ssi_tests: tests/framework/utils/defaults.sh chroma-bundles/chroma_support.repo.
upgrade_tests:
tests/framework/integration/installation_and_upgrade/jenkins_steps/main $@

efs_tests:
pdsh -R ssh -l root -S -w vm[5-9] "echo \"options lnet networks=\\\"tcp(eth1)\\\"\" > /etc/modprobe.d/iml_lnet_module_parameters.conf; systemctl disable firewalld; systemctl stop firewalld"
efs_tests: tests/framework/utils/defaults.sh chroma-bundles/chroma_support.repo.in
tests/framework/integration/existing_filesystem_configuration/jenkins_steps/main $@

chroma_test_env: chroma_test_env/bin/activate
Expand Down
248 changes: 248 additions & 0 deletions provisioner_output-vagrant-efs.json.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,248 @@
{
"reset": true,
"lustre_devices": [
{
"backend_filesystem": "ldiskfs",
"path_index": 0
},
{
"backend_filesystem": "ldiskfs",
"path_index": 1
},
{
"backend_filesystem": "ldiskfs",
"path_index": 2
},
{
"backend_filesystem": "ldiskfs",
"path_index": 3
},
{
"backend_filesystem": "ldiskfs",
"path_index": 4
}
],
"success": true,
"lustre_clients": [
{
"nodename": "@HOSTNAME@vm@CLUSTER@2",
"device_paths": [],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@2",
"fqdn": "@HOSTNAME@vm@CLUSTER@2@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@2",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@2",
"host": "@VMHOST@",
"address": "@HOSTNAME@vm@CLUSTER@2@DOMAINNAME@",
"ip_address": "@VM2_IPADDRESS@",
"lnet_address": "@VM2_LNETADDRESS@",
"distro": "el7.5"
}
],
"provision": true,
"repos": {
"chroma": {
"build_number": 511,
"build_job": "manager-for-lustre"
}
},
"hosts": {
"@VMHOST@": {
"cluster_num": @CLUSTER_NUM@,
"nodename": "@VMHOST@@DOMAINNAME@",
"ip_address": "@HOST_IP_ADDRESS@",
"fqdn": "@VMHOST@@DOMAINNAME@",
"address": "@VMHOST@@DOMAINNAME@"
}
},
"failover_is_configured": false,
"filesystem": {
"name": "efs",
"targets": {
"efs-OSTorMDT0003": {
"index": 3,
"kind": "OSTorMDT",
"mount_path": "/mnt/ostORmdt3",
"mount_server": "secondary_server",
"failover_mode": "failnode",
"primary_server": "hydra-2-efs-oss2"
},
"efs-OST0002": {
"index": 2,
"kind": "OST",
"mount_path": "/mnt/ost2",
"mount_server": "primary_server",
"failover_mode": "failnode",
"primary_server": "hydra-2-efs-oss2"
},
"efs-OST0000": {
"index": 0,
"kind": "OST",
"mount_path": "/mnt/ost0",
"mount_server": "primary_server",
"failover_mode": "servicenode",
"primary_server": "hydra-2-efs-oss1"
},
"efs-OST0001": {
"index": 1,
"kind": "OST",
"mount_path": "/mnt/ost1",
"mount_server": "secondary_server",
"failover_mode": "servicenode",
"primary_server": "hydra-2-efs-oss1"
},
"efs-MDT0000": {
"index": 0,
"kind": "MDT",
"mount_path": "/mnt/mdt",
"primary_server": "hydra-2-efs-mgs-mds"
},
"MGS": {
"index": 0,
"kind": "MGT",
"mount_path": "/mnt/mdt",
"primary_server": "hydra-2-efs-mgs-mds"
}
}
},
"test_runners": [
{
"nodename": "@HOSTNAME@vm@CLUSTER@4",
"device_paths": [],
"repos": ["chroma"],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@4",
"fqdn": "@HOSTNAME@vm@CLUSTER@4@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@4",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@4",
"host": "@VMHOST@",
"address": "@HOSTNAME@vm@CLUSTER@4@DOMAINNAME@",
"ip_address": "@VM4_IPADDRESS@",
"distro": "el7.5"
}
],
"managed": false,
"lustre_servers": [
{
"firewall_enabled": false,
"nodename": "@HOSTNAME@vm@CLUSTER@5",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
],
"repos": ["chroma"],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@5",
"fqdn": "@HOSTNAME@vm@CLUSTER@5@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@5",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@5",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"device_type": "linux",
"address": "@HOSTNAME@vm@CLUSTER@5@DOMAINNAME@",
"ip_address": "@VM5_IPADDRESS@",
"lnet_address": "@VM5_LNETADDRESS@",
"distro": "el7.5"
},
{
"firewall_enabled": false,
"nodename": "@HOSTNAME@vm@CLUSTER@6",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
],
"repos": ["chroma"],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@6",
"fqdn": "@HOSTNAME@vm@CLUSTER@6@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@6",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@6",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"device_type": "linux",
"address": "@HOSTNAME@vm@CLUSTER@6@DOMAINNAME@",
"ip_address": "@VM6_IPADDRESS@",
"lnet_address": "@VM6_LNETADDRESS@",
"distro": "el7.5"
},
{
"firewall_enabled": false,
"nodename": "@HOSTNAME@vm@CLUSTER@7",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
],
"repos": ["chroma"],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@7",
"fqdn": "@HOSTNAME@vm@CLUSTER@7@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@7",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@7",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"device_type": "linux",
"address": "@HOSTNAME@vm@CLUSTER@7@DOMAINNAME@",
"ip_address": "@VM7_IPADDRESS@",
"lnet_address": "@VM7_LNETADDRESS@",
"distro": "el7.5"
},
{
"firewall_enabled": false,
"nodename": "@HOSTNAME@vm@CLUSTER@8",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
],
"repos": ["chroma"],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@8",
"fqdn": "@HOSTNAME@vm@CLUSTER@8@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@8",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@8",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"device_type": "linux",
"address": "@HOSTNAME@vm@CLUSTER@8@DOMAINNAME@",
"ip_address": "@VM8_IPADDRESS@",
"lnet_address": "@VM8_LNETADDRESS@",
"distro": "el7.5"
}
],
"test_ha": true,
"chroma_managers": [
{
"server_http_url": "https://@HOSTNAME@vm@CLUSTER@3@DOMAINNAME@/",
"firewall_enabled": true,
"users": [
{
"username": "admin",
"password": "lustre",
"super": true,
"email": "nobody@example.com"
}
],
"nodename": "@HOSTNAME@vm@CLUSTER@3",
"device_paths": [],
"repos": ["chroma"],
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@3",
"fqdn": "@HOSTNAME@vm@CLUSTER@3@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@3",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@3",
"host": "@VMHOST@",
"selinux_enabled": true,
"address": "@HOSTNAME@vm@CLUSTER@3@DOMAINNAME@",
"ip_address": "@VM3_IPADDRESS@",
"distro": "el7.5"
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ CLUSTER_CONFIG=${CLUSTER_CONFIG:-"$(ls $PWD/existing_filesystem_configuration_cl

# need to remove the chroma repositories configured by the provisioner
pdsh -l root -R ssh -S -w $(spacelist_to_commalist $CHROMA_MANAGER) "exec 2>&1; set -xe
# Clean out any yum info, if this is a manual system not automated the underlying repos might have changed.
yum clean all
if $MEASURE_COVERAGE; then
if [ -f /etc/yum.repos.d/autotest.repo ]; then
cat << \"EOF\" >> /etc/yum.repos.d/autotest.repo
Expand Down Expand Up @@ -71,37 +74,29 @@ yum -y install pdsh" | dshbak -c

# Install and setup manager
if $JENKINS; then
ARCHIVE_PATH=.
ARCHIVE_PATH=$WORKSPACE
else
ARCHIVE_PATH=chroma-bundles
ARCHIVE_PATH=$CHROMA_DIR/_topdir/RPMS/noarch/
fi

if [ -f ~/storage_server.repo.in ]; then
STORAGE_SERVER_REPO=~/storage_server.repo.in
else
STORAGE_SERVER_REPO=$CHROMA_DIR/storage_server.repo
fi

scp $STORAGE_SERVER_REPO $ARCHIVE_PATH/$ARCHIVE_NAME $CHROMA_DIR/tests/utils/install.exp root@$CHROMA_MANAGER:/tmp
scp $CHROMA_DIR/chroma_support.repo $STORAGE_SERVER_REPO $(ls $ARCHIVE_PATH/$ARCHIVE_NAME) $CHROMA_DIR/tests/utils/install.exp root@$CHROMA_MANAGER:/tmp
ssh root@$CHROMA_MANAGER "#don't do this, it hangs the ssh up, when used with expect, for some reason: exec 2>&1
set -ex
yum -y install expect pdsh
mv /tmp/chroma_support.repo /etc/yum.repos.d/
# Install from the installation package
cd /tmp
tar xzvf $ARCHIVE_NAME
cd ${ARCHIVE_NAME%.tar.gz}
# Execute the create_installer script and distribute the install procedure to the storage nodes
./create_installer zfs
./create_installer ldiskfs
if ! pdcp -l root -R ssh -w $(spacelist_to_commalist ${STORAGE_APPLIANCES[@]}) lustre-zfs-${TEST_DISTRO_NAME}${TEST_DISTRO_VERSION%%.*}-installer.tar.gz lustre-ldiskfs-${TEST_DISTRO_NAME}${TEST_DISTRO_VERSION%%.*}-installer.tar.gz $INSTALLER_PATH; then
echo "Failed to copy installer files"
exit 1
fi
if ! expect ../install.exp $CHROMA_USER $CHROMA_EMAIL $CHROMA_PASS ${CHROMA_NTP_SERVER:-localhost}; then
rc=\${PIPESTATUS[0]}
cat /var/log/chroma/install.log
exit \$rc
fi
yum -y install \$(ls $ARCHIVE_NAME)
expect install.exp $CHROMA_USER $CHROMA_EMAIL $CHROMA_PASS ${CHROMA_NTP_SERVER:-localhost}
# override /usr/share/chroma-manager/storage_server.repo
if [ -f /tmp/storage_server.repo.in ]; then
Expand Down Expand Up @@ -157,22 +152,7 @@ EOF
fi
$PROXY yum -y install python-setuptools python2-coverage
pushd /tmp/
tar xzvf lustre-ldiskfs-el7-installer.tar.gz
tries=0
failed=true
while \$failed && [ \$tries -lt 10 ]; do
if ! bash -x lustre-ldiskfs/install; then
yum --enablerepo=lustre clean metadata
let tries+=1
else
failed=false
fi
done
if \$failed; then
exit 1
fi
popd
yum -y install kernel-devel-[0-9]\*_lustre lustre-ldiskfs
# Make sure the firewall is not in the way
if firewall-cmd --state; then
Expand Down Expand Up @@ -228,7 +208,7 @@ source $CHROMA_DIR/tests/framework/integration/utils/install_client.sh

# Install and setup integration tests
if ! $JENKINS; then
CMIT=$(ls chroma-manager/dist/chroma-manager-integration-tests-*.x86_64.rpm)
CMIT=$(ls $CHROMA_DIR/_topdir/RPMS/noarch/python2-iml-manager-integration-tests-*.noarch.rpm)
fi
scp $CMIT $CLUSTER_CONFIG root@$TEST_RUNNER:/root/
ssh root@$TEST_RUNNER <<EOF
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,15 @@ make substs

set_defaults false
check_for_autopass
if ! $JENKINS; then
CHROMA_DIR="$PWD"
fi
export CLUSTER_CONFIG_TEMPLATE=${CLUSTER_CONFIG_TEMPLATE:-"$CHROMA_DIR/tests/framework/integration/existing_filesystem_configuration/existing_filesystem_configuration_cluster_cfg.json"}

if $JENKINS; then
cd $WORKSPACE
# Copy a fingerprinted file so we can link together the projects in jenkins.
CHROMA_DIR="$PWD"

curl -f -k -O "$JOB_URL/$ARCHIVE_NAME"
fi
export CLUSTER_CONFIG_TEMPLATE=${CLUSTER_CONFIG_TEMPLATE:-"$CHROMA_DIR/tests/framework/integration/existing_filesystem_configuration/existing_filesystem_configuration_cluster_cfg.json"}

cd $WORKSPACE

got_aborted=false
# Gather logs from nodes and release the cluster at exit

trap "set +e; cleanup" EXIT

trap "set -x
Expand Down Expand Up @@ -49,11 +43,6 @@ for dne in false true; do
fi

for device_type in ${device_types_to_test[@]}; do
if [ $TEST_DISTRO_VERSION == "7.2" ] && [ $device_type == "linux" ] && ! $test_ha; then
echo "Skipping linux with no HA on el7.2 until it can be fixed"
continue
fi

echo "Beginning automated ${device_type} run..."

export TEST_SPECIFIC_CLUSTER_CONFIG=$PWD/existing_filesystem_configuration_cluster_cfg_${device_type}_HA_is_${test_ha}_dne_is_${dne}.json
Expand Down
Loading

0 comments on commit db4cb43

Please sign in to comment.