Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/gh-pages-pr-workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ jobs:
working-directory: ./docs/
- name: install requirements
run: |
pip install -r test-requirements.txt
pip install --no-deps -e${{github.workspace}}/apiclient
pip install -r test-requirements.txt --require-hashes
- name: Run pdoc3
run: |
pdoc --html -o . .
Expand Down
11 changes: 5 additions & 6 deletions harvester_e2e_tests/integrations/test_1_volumes.py
Original file line number Diff line number Diff line change
Expand Up @@ -668,16 +668,15 @@ def test_create_volume_snapshot_and_restore(
7. Delete source volume, the snapshot should be deleted
Ref. https://github.com/harvester/harvester/issues/2294
"""
image_id = None
if source_type == "VM Image":
image_id = ubuntu_image['id']
unique_name = f"{unique_name}-image"
spec = api_client.volumes.Spec.for_image(ubuntu_image['info'])
else:
spec = api_client.volumes.Spec("10Gi")

# Create a volume from source_type(Empty or VM Image)
spec = api_client.volumes.Spec("10Gi", storage_cls=None)
kws = dict()
code, data = api_client.volumes.create(unique_name, spec, image_id=image_id, **kws)
assert 201 == code, (code, unique_name, data, image_id)
code, data = api_client.volumes.create(unique_name, spec, **kws)
assert 201 == code, (code, unique_name, data, spec.to_dict(unique_name, 'default'))

# Wait the volume become ready
volumes_ready, (code, data) = volume_checker.wait_volumes_ready([unique_name])
Expand Down
98 changes: 98 additions & 0 deletions harvester_e2e_tests/integrations/test_4_vm_snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,3 +597,101 @@ def test_volume_snapshots_are_cleaned_up_after_source_volume_deleted(self, api_c
sleep(1)
else:
raise AssertionError(f"timed out waiting for {volumesnapshotname} to be deleted")


@pytest.mark.p0
@pytest.mark.virtualmachines
@pytest.mark.volumes
class TestVolumeSnapshotWithVM:
def test_snapshot_of_volume_on_vm(self, api_client,
source_vm, vm_checker,
volume_checker, wait_timeout,
host_shell, vm_shell,
ssh_keypair, polling_for):
"""
1. Create a VM with volume
2. Write some data on the VM
3. Create snapshot of the volume of the VM
4. Restore the snapshot to a new volume
5. Create new VM and use the restored volume
6. Check the data in the new VM
Ref. https://github.com/harvester/harvester/issues/2294
"""
vm_name, ssh_user = source_vm

_, data = api_client.vms.get(vm_name)

vol_name = (data["spec"]["template"]["spec"]["volumes"][0]
['persistentVolumeClaim']['claimName'])

vm_started, (code, vmi) = vm_checker.wait_started(vm_name)
assert vm_started, (code, vmi)

# Write 123 into test.txt
def action(sh):
_, _ = sh.exec_command("echo 123 > test.txt") # nosec B601
_, _ = sh.exec_command("sync") # nosec B601
Comment on lines +630 to +633
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nonblocking: this is great 😄 - perhaps in a future enhancement we can collect & audit an md5sum of it, to validate that when it's restored the data's file integrity is the same.


vm_shell_do(vm_name, api_client,
host_shell, vm_shell,
ssh_user, ssh_keypair,
action, wait_timeout)

# Create a snapshot of the volume
snapshot_name = f"{vol_name[:60]}-snapshot"
code, _ = api_client.volumes.snapshot(vol_name, snapshot_name)
assert 204 == code, (f"Failed to create snapshot of volume {vol_name}")

# Wait the snapshot become ready
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vol_snapshots.get(snapshot_name)
if code == 200 and data.get('status', {}).get('readyToUse', False):
break
sleep(3)
else:
raise AssertionError(
f"Timeout in waiting the snapshot {snapshot_name} to be ready.\n"
f"Last API Status({code}): {data}"
)

# Restore the snapshot
restore_vol_name = f"{snapshot_name[:60]}-restore"
code, _ = api_client.vol_snapshots.restore(snapshot_name, restore_vol_name)
assert 204 == code, (
f"Failed to restore snapshot {snapshot_name} to volume {restore_vol_name}")

# Wait the restored volume become ready, check the data resource as expected
volumes_ready, (code, data) = volume_checker.wait_volumes_ready([restore_vol_name])
assert volumes_ready, (code, data)
data_source = data.get('spec', {}).get('dataSource', {}).get('name')
assert snapshot_name == data_source, (
f"Data source mismatch. Expected {snapshot_name}, got {data_source}")

# Create new VM and use the restored volume
vm_with_restore_vol_name = f"{restore_vol_name[:60]}-vm"
vm_spec = api_client.vms.Spec(1, 2)
vm_spec.add_existing_volume(vm_with_restore_vol_name, restore_vol_name)
code, data = api_client.vms.create(vm_with_restore_vol_name, vm_spec)
assert 201 == code, f"Fail to create VM\n{code}, {data}"
code, data = polling_for(
"VM do created",
lambda c, d: 200 == c and d.get('status', {}).get('printableStatus') == "Running",
api_client.vms.get, vm_with_restore_vol_name
)

# Check the data in the new VM
def actassert(sh):
out, _ = sh.exec_command("cat test.txt") # nosec B601
assert "123" in out
Comment on lines +683 to +686
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


vm_shell_do(vm_with_restore_vol_name, api_client,
host_shell, vm_shell,
ssh_user, ssh_keypair,
actassert, wait_timeout)

# Teardown: delete the restore volume
vm_deleted, (code, data) = vm_checker.wait_deleted(vm_with_restore_vol_name)
assert vm_deleted, (code, data)
volumes_deleted, (code, data) = volume_checker.wait_volumes_deleted([restore_vol_name])
assert volumes_deleted, (code, data)
Loading