Skip to content

Commit

Permalink
Use n1.medium during preference functional tests
Browse files Browse the repository at this point in the history
Given that all preferences now have resource requirements we need to
actually provide adequate resources to the VirtualMachine.

A negative test is also added for each preference ensuring that requests
to create a VirtualMachine using an instance type that doesn't provide
enough resources are rejected.

Additionally the use of virtctl is introduced, removing a long standing
TODO from both the preference and instance type tests.

Signed-off-by: Lee Yarwood <lyarwood@redhat.com>
  • Loading branch information
lyarwood committed Jun 20, 2023
1 parent 943bfba commit 37dbb7e
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 53 deletions.
74 changes: 23 additions & 51 deletions scripts/functest.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,68 +14,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.

if [ -z "${KUBECTL}" ]; then
echo "${BASH_SOURCE[0]} expects the following env variables to be provided: KUBECTL."
if [ -z "${KUBECTL}" ] || [ -z "${VIRTCTL}" ]; then
echo "${BASH_SOURCE[0]} expects the following env variables to be provided: KUBECTL, VIRTCTL."
exit 1
fi

# Create a custom tiny instance type for negative tests around preference resource requirements
${VIRTCTL} create instancetype --cpu 1 --memory 64Mi --name tiny | ${KUBECTL} apply -f -

# This func test simply loops over the installed instance types and preferences, assigning each to a VirtualMachine to ensure they are accepted by the webhooks
for preference in $(${KUBECTL} get virtualmachineclusterpreferences --no-headers -o custom-columns=':metadata.name'); do
# TODO(lyarwood): Replace with virtctl create vm once 0.59.0 is released
# ${VIRTCTL} create vm --preference ${preference}
${KUBECTL} apply -f - << EOF
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: vm-${preference}
spec:
preference:
name: ${preference}
running: false
template:
spec:
domain:
devices: {}
volumes:
- containerDisk:
image: quay.io/containerdisks/fedora:37
name: containerdisk
EOF
# We can't inline the above creation call below so stash the return code and check it to keep shellcheck happy
ret=$?
if [ $ret -ne 0 ]; then
echo "functest failed on preference ${preference}"

# Ensure a VirtualMachine using a preference with resource requirements is rejected if it does not provide enough resources.
if ${KUBECTL} get virtualmachineclusterpreferences/"${preference}" -o json | jq -er .spec.requirements > /dev/null 2>&1; then
# TODO(lyarwood): virtctl should be extended with a --cpu switch to allow the non instancetype use case to be tested here
if ${VIRTCTL} create vm --instancetype tiny --preference "${preference}" --volume-containerdisk name:disk,src:quay.io/containerdisks/fedora:latest --name "vm-${preference}-requirements" | ${KUBECTL} apply -f - ; then
echo "functest failed - Preference ${preference} should not be able to use virtualmachineclusterinstancetype tiny"
${KUBECTL} delete "vm/vm-${preference}-requirements"
exit 1
fi
fi

# Ensure a VirtualMachine can be created when enough resources are provided using the n1.medium instance type
if ! ${VIRTCTL} create vm --instancetype n1.medium --preference "${preference}" --volume-containerdisk name:disk,src:quay.io/containerdisks/fedora:latest --name "vm-${preference}" | ${KUBECTL} apply -f - ; then
echo "functest failed on preference ${preference} using instancetype n1.medium"
exit 1
fi
${KUBECTL} delete "vm/vm-${preference}"
done

# Cleanup the custom instancetype
${KUBECTL} delete virtualmachineclusterinstancetypes/tiny

for instancetype in $(${KUBECTL} get virtualmachineclusterinstancetypes --no-headers -o custom-columns=':metadata.name'); do
# TODO(lyarwood): Replace with virtctl create vm once 0.59.0 is released
# ${VIRTCTL} create vm --instance-type ${instancetype}
${KUBECTL} apply -f - << EOF
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: vm-${instancetype}
spec:
instancetype:
name: ${instancetype}
running: false
template:
spec:
domain:
devices: {}
volumes:
- containerDisk:
image: quay.io/containerdisks/fedora:37
name: containerdisk
EOF
# We can't inline the above creation call below so stash the return code and check it to keep shellcheck happy
ret=$?
if [ $ret -ne 0 ]; then
if ! ${VIRTCTL} create vm --instancetype "${instancetype}" --volume-containerdisk name:disk,src:quay.io/containerdisks/fedora:latest --name "vm-${instancetype}" | ${KUBECTL} apply -f - ; then
echo "functest failed on instance type ${instancetype}"
exit 1
fi
Expand Down
3 changes: 2 additions & 1 deletion scripts/kubevirt.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ _base_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)
_kubectl="${_base_dir}/_kubevirt/cluster-up/kubectl.sh"
_kubessh="${_base_dir}/_kubevirt/cluster-up/ssh.sh"
_kubevirtcicli="${_base_dir}/_kubevirt/cluster-up/cli.sh"
_virtctl="${_base_dir}/_kubevirt/cluster-up/virtctl.sh"
_action=$1
shift

Expand Down Expand Up @@ -50,7 +51,7 @@ function kubevirt::registry() {
}

function kubevirtci::functest() {
KUBECTL=${_kubectl} "${_base_dir}/scripts/functest.sh"
KUBECTL=${_kubectl} VIRTCTL=${_virtctl} "${_base_dir}/scripts/functest.sh"
}

kubevirt::install
Expand Down
3 changes: 2 additions & 1 deletion scripts/kubevirtci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ _cluster_up_dir="${_base_dir}/_cluster-up"
_kubectl="${_cluster_up_dir}/cluster-up/kubectl.sh"
_kubessh="${_cluster_up_dir}/cluster-up/ssh.sh"
_kubevirtcicli="${_cluster_up_dir}/cluster-up/cli.sh"
_virtctl="${_cluster_up_dir}/cluster-up/virtctl.sh"
_action=$1
shift

Expand Down Expand Up @@ -71,7 +72,7 @@ function kubevirtci::kubeconfig() {
}

function kubevirtci::functest() {
KUBECTL=${_kubectl} "${_base_dir}/scripts/functest.sh"
KUBECTL=${_kubectl} VIRTCTL=${_virtctl} "${_base_dir}/scripts/functest.sh"
}

kubevirtci::fetch_kubevirtci
Expand Down

0 comments on commit 37dbb7e

Please sign in to comment.