Skip to content

Commit

Permalink
feat(backups/cleanVm): detect and fix cache inconsistencies (#6575)
Browse files Browse the repository at this point in the history
  • Loading branch information
julien-f committed Dec 7, 2022
1 parent a5daba2 commit 6973928
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 25 deletions.
12 changes: 6 additions & 6 deletions @xen-orchestra/backups/RemoteAdapter.js
Expand Up @@ -508,7 +508,7 @@ class RemoteAdapter {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}

async #readCache(path) {
async _readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
Expand All @@ -521,15 +521,15 @@ class RemoteAdapter {
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
const cache = await this._readCache(path)
if (cache !== undefined) {
fn(cache)

await this.#writeCache(path, cache)
await this._writeCache(path, cache)
}
}

async #writeCache(path, data) {
async _writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
Expand Down Expand Up @@ -577,7 +577,7 @@ class RemoteAdapter {
async _readCacheListVmBackups(vmUuid) {
const path = this.#getVmBackupsCache(vmUuid)

const cache = await this.#readCache(path)
const cache = await this._readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
Expand All @@ -590,7 +590,7 @@ class RemoteAdapter {
}

// detached async action, will not reject
this.#writeCache(path, backups)
this._writeCache(path, backups)

return backups
}
Expand Down
64 changes: 45 additions & 19 deletions @xen-orchestra/backups/_cleanVm.js
Expand Up @@ -311,7 +311,6 @@ exports.cleanVm = async function cleanVm(
}

const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
Expand All @@ -327,6 +326,20 @@ exports.cleanVm = async function cleanVm(
}
})

const cachePath = vmDir + '/cache.json.gz'

let mustRegenerateCache
{
const cache = await this._readCache(cachePath)
const actual = cache === undefined ? 0 : Object.keys(cache).length
const expected = jsons.size

mustRegenerateCache = actual !== expected
if (mustRegenerateCache) {
logWarn('unexpected number of entries in backup cache', { path: cachePath, actual, expected })
}
}

await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
Expand All @@ -338,6 +351,8 @@ exports.cleanVm = async function cleanVm(
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)

const backups = new Map()

// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
Expand All @@ -350,19 +365,16 @@ exports.cleanVm = async function cleanVm(
return
}

let isBackupComplete

const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
isBackupComplete = xvas.has(linkedXva)
if (isBackupComplete) {
unusedXvas.delete(linkedXva)
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
Expand All @@ -371,22 +383,28 @@ exports.cleanVm = async function cleanVm(
})()

const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
isBackupComplete = missingVhds.length === 0

// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (missingVhds.length === 0) {
if (isBackupComplete) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
linkedVhds.forEach(path => {
vhdsToJSons[path] = json
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
jsons.delete(json)
await handler.unlink(json)
}
}
}

if (isBackupComplete) {
backups.set(json, metadata)
} else {
jsons.delete(json)
if (remove) {
logInfo('deleting incomplete backup', { backup: json })
mustRegenerateCache = true
await handler.unlink(json)
}
}
})
Expand Down Expand Up @@ -496,7 +514,7 @@ exports.cleanVm = async function cleanVm(
// check for the other that the size is the same as the real file size

await asyncMap(jsons, async metadataPath => {
const metadata = JSON.parse(await handler.readFile(metadataPath))
const metadata = backups.get(metadataPath)

let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
Expand Down Expand Up @@ -538,6 +556,7 @@ exports.cleanVm = async function cleanVm(
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
mustRegenerateCache = true
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
Expand All @@ -546,9 +565,16 @@ exports.cleanVm = async function cleanVm(
}
})

// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
if (mustRegenerateCache) {
const cache = {}
for (const [path, content] of backups.entries()) {
cache[path] = {
_filename: path,
id: path,
...content,
}
}
await this._writeCache(cachePath, cache)
}

return {
Expand Down
3 changes: 3 additions & 0 deletions CHANGELOG.unreleased.md
Expand Up @@ -7,6 +7,8 @@

> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backups] Automatically detect, report and fix cache inconsistencies

### Bug fixes

> Users must be able to say: “I had this issue, happy to know it's fixed”
Expand All @@ -29,5 +31,6 @@
<!--packages-start-->

- @xen-orchestra/backups patch
- xo-server: patch
<!--packages-end-->

0 comments on commit 6973928

Please sign in to comment.