Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(backups): update VM backups cache #6411

Merged
merged 5 commits into from
Sep 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
113 changes: 84 additions & 29 deletions @xen-orchestra/backups/RemoteAdapter.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ const zlib = require('zlib')

const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
Expand All @@ -34,7 +35,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS

const { warn } = createLogger('xo:backups:RemoteAdapter')
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')

const compareTimestamp = (a, b) => a.timestamp - b.timestamp

Expand Down Expand Up @@ -224,11 +225,30 @@ class RemoteAdapter {
return promise
}

#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}

async deleteDeltaVmBackups(backups) {
const handler = this._handler

// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))

this.#removeVmBackupsFromCache(backups)
}

async deleteMetadataBackup(backupId) {
Expand Down Expand Up @@ -256,6 +276,8 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)

this.#removeVmBackupsFromCache(backups)
}

deleteVmBackup(file) {
Expand All @@ -281,9 +303,6 @@ class RemoteAdapter {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, logWarn: warn })
}

const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
}

#getCompressionType() {
Expand Down Expand Up @@ -458,11 +477,41 @@ class RemoteAdapter {
return backupsByPool
}

async _invalidateVmBackupListCacheDir(vmDir) {
await this.handler.unlink(`${vmDir}/cache.json.gz`)
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}

async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}

_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)

await this.#writeCache(path, cache)
}
}

async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
}

async invalidateVmBackupListCache(vmUuid) {
await this._invalidateVmBackupListCacheDir(`${BACKUP_DIR}/${vmUuid}`)
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
}

async #getCachabledDataListVmBackups(dir) {
Expand Down Expand Up @@ -501,41 +550,25 @@ class RemoteAdapter {
// if cache is missing or broken => regenerate it and return

async _readCacheListVmBackups(vmUuid) {
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
const path = this.#getVmBackupsCache(vmUuid)

try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
const cache = await this.#readCache(path)
if (cache !== undefined) {
return cache
}

// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(dir)
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
if (backups === undefined) {
return
}

// detached async action, will not reject
this.#writeVmBackupsCache(path, backups)
this.#writeCache(path, backups)

return backups
}

async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}

async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
Expand Down Expand Up @@ -574,6 +607,28 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}

async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`

await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})

// will not throw
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,

// these values are required in the cache
_filename: path,
id: path,
}
})

return path
}

async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler

Expand Down
3 changes: 1 addition & 2 deletions @xen-orchestra/backups/_cleanVm.js
Original file line number Diff line number Diff line change
Expand Up @@ -533,8 +533,7 @@ exports.cleanVm = async function cleanVm(

// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
// cleanvm is always invoked as a method of RemoteAdapter
await this._invalidateVmBackupListCacheDir(vmDir)
await handler.unlink(vmDir + '/cache.json.gz')
}

return {
Expand Down
5 changes: 1 addition & 4 deletions @xen-orchestra/backups/writers/DeltaBackupWriter.js
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)

const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataContent = {
jobId,
mode: job.mode,
Expand Down Expand Up @@ -223,9 +222,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
})
metadataContent.size = size
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)

// TODO: run cleanup?
}
Expand Down
7 changes: 2 additions & 5 deletions @xen-orchestra/backups/writers/FullBackupWriter.js
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup

const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)

// TODO: clean VM backup directory
Expand All @@ -50,7 +49,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const dataBasename = basename + '.xva'
const dataFilename = backupDir + '/' + dataBasename

const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,
Expand All @@ -74,9 +73,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)

if (!deleteFirst) {
await deleteOldBackups()
Expand Down
1 change: 0 additions & 1 deletion @xen-orchestra/backups/writers/_MixinBackupWriter.js
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}

healthCheck(sr) {
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.unreleased.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

> Users must be able to say: “Nice enhancement, I'm eager to test it”

- [Backup] Improve listing speed by updating caches instead of regenerating them on backup creation/deletion (PR [#6411](https://github.com/vatesfr/xen-orchestra/pull/6411))

### Bug fixes

> Users must be able to say: “I had this issue, happy to know it's fixed”
Expand Down