From 3b08e65bfafb53a7e1e58f4a0d72ed1f7c599ae5 Mon Sep 17 00:00:00 2001 From: Romy Date: Thu, 29 Dec 2022 19:18:40 +0200 Subject: [PATCH] first drop delete object Signed-off-by: Romy --- src/native/fs/fs_napi.cpp | 2 +- src/sdk/namespace_fs.js | 365 +++++++- .../unit_tests/test_bucketspace_versioning.js | 806 +++++++++++++++++- src/test/unit_tests/test_namespace_fs.js | 16 +- src/test/unit_tests/test_nsfs_versioning.js | 5 + 5 files changed, 1106 insertions(+), 88 deletions(-) diff --git a/src/native/fs/fs_napi.cpp b/src/native/fs/fs_napi.cpp index d51c08a10a..bdeea95bdf 100644 --- a/src/native/fs/fs_napi.cpp +++ b/src/native/fs/fs_napi.cpp @@ -154,7 +154,7 @@ const static std::map flags_to_case = { }; const static std::vector GPFS_XATTRS{ GPFS_ENCRYPTION_XATTR_NAME }; -const static std::vector USER_XATTRS{ "user.content_md5", "user.version_id", "user.prev_version_id" }; +const static std::vector USER_XATTRS{ "user.content_md5", "user.version_id", "user.prev_version_id", "user.delete_marker"}; struct Entry { diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js index e7172c08ca..89b4cd3ec4 100644 --- a/src/sdk/namespace_fs.js +++ b/src/sdk/namespace_fs.js @@ -1,4 +1,5 @@ /* Copyright (C) 2020 NooBaa */ +/*eslint max-lines: ["error", 2500]*/ 'use strict'; const _ = require('lodash'); @@ -8,6 +9,7 @@ const util = require('util'); const mime = require('mime'); const { v4: uuidv4 } = require('uuid'); const dbg = require('../util/debug_module')(__filename); +const P = require('../util/promise'); const config = require('../../config'); const s3_utils = require('../endpoint/s3/s3_utils'); @@ -38,9 +40,8 @@ const XATTR_USER_PREFIX = 'user.'; const XATTR_MD5_KEY = XATTR_USER_PREFIX + 'content_md5'; const XATTR_VERSION_ID = XATTR_USER_PREFIX + 'version_id'; const XATTR_PREV_VERSION_ID = XATTR_USER_PREFIX + 'prev_version_id'; -//const XATTR_DELETE_MARKER = XATTR_USER_PREFIX + 'delete_marker'; -const HIDDEN_VERSIONS_PATH = '.versions'; const XATTR_DELETE_MARKER = XATTR_USER_PREFIX + 'delete_marker'; +const HIDDEN_VERSIONS_PATH = '.versions'; const versioning_status_enum = { VER_ENABLED: 'ENABLED', @@ -550,7 +551,7 @@ class NamespaceFS { try { const fs_context = this.prepare_fs_context(object_sdk); await this._load_bucket(params, fs_context); - const file_path = this._find_version_path(fs_context, params); + const file_path = await this._find_version_path(fs_context, params); return fs.createReadStream(file_path, { highWaterMark: config.NSFS_BUF_SIZE, start: Number.isInteger(params.start) ? params.start : undefined, @@ -809,6 +810,9 @@ class NamespaceFS { const part_upload = file_path === upload_path; const same_inode = params.copy_source && copy_res === copy_status_enum.SAME_INODE; + let stat = await target_file.stat(fs_context); + this._verify_encryption(params.encryption, this._get_encryption_info(stat)); + let fs_xattr; // handle xattr if (!params.copy_source || !params.xattr_copy) { @@ -822,16 +826,14 @@ class NamespaceFS { fs_xattr = this._assign_md5_to_fs_xattr(digest, fs_xattr); } if (!part_upload && this._is_versioning_enabled()) { - fs_xattr = await this._assign_versions_to_fs_xattr(fs_context, file_path, target_file, fs_xattr); + const cur_ver_info = await this._get_version_info(fs_context, file_path); + fs_xattr = await this._assign_versions_to_fs_xattr(fs_context, cur_ver_info, stat, params.key, fs_xattr); } if (fs_xattr) await target_file.replacexattr(fs_context, fs_xattr); } // fsync if (config.NSFS_TRIGGER_FSYNC) await target_file.fsync(fs_context); - dbg.log1('NamespaceFS.upload_stream:', open_mode, file_path, upload_path); - - let stat = await target_file.stat(fs_context); - this._verify_encryption(params.encryption, this._get_encryption_info(stat)); + dbg.log1('NamespaceFS._finish_upload:', open_mode, file_path, upload_path, fs_xattr); if (!same_inode && !part_upload) { await this._move_to_dest(fs_context, upload_path, file_path, target_file, open_mode, { key: params.key }); @@ -849,44 +851,45 @@ class NamespaceFS { // 1. safe_link // 2. create tmp file // 2. safe_unlink - async safe_move_posix(fs_context, from_path, to_path, { mtimeNsBigint, ino }) { + async safe_move_posix(fs_context, from_path, to_path, version_info) { // retry safe linking a file in case of parallel put/delete of the source path await this._wrap_safe_op_with_retries( - nb_native().fs.safe_link, - { fs_context, from_path, to_path, mtimeNsBigint, ino }, - config.NSFS_RENAME_RETRIES, + fs_context, + async ({ mtimeNsBigint, ino }) => nb_native().fs.safe_link(fs_context, from_path, to_path, mtimeNsBigint, ino), + Object.assign(version_info || {}, { from_path, to_path }), 'FS::SafeLink ERROR link target doesn\'t match expected inode and mtime' ); - const upload_id = uuidv4(); - const upload_path = path.join(this.bucket_path, this.get_bucket_tmpdir(), 'versions', upload_id); - await this._make_path_dirs(upload_path, fs_context); - // retry safe unlinking a file in case of parallel put/delete of the source path await this._wrap_safe_op_with_retries( - nb_native().fs.safe_unlink, - { fs_context, from_path, to_path: upload_path, mtimeNsBigint, ino }, - config.NSFS_RENAME_RETRIES, + fs_context, + async ({ mtimeNsBigint, ino }) => { + const unique_temp_path = await this._generate_unique_path(fs_context); + await nb_native().fs.safe_unlink(fs_context, from_path, unique_temp_path, mtimeNsBigint, ino); + }, + Object.assign(version_info || {}, { from_path }), 'FS::SafeUnlink ERROR unlink target doesn\'t match expected inode and mtime' ); } - async _wrap_safe_op_with_retries(handler, { fs_context, from_path, to_path, mtimeNsBigint, ino }, retries_num, err_msg) { - let retries = retries_num; + async _wrap_safe_op_with_retries(fs_context, handler, params, retry_err_msg, success_err_codes) { + let retries = config.NSFS_RENAME_RETRIES; + let { from_path, to_path = undefined, mtimeNsBigint, ino } = params; for (;;) { try { dbg.log1('Namespace_fs.wrap_safe_with_retries: ', handler, fs_context, from_path, to_path, mtimeNsBigint, ino); - await handler(fs_context, from_path, to_path, mtimeNsBigint, ino); + await handler(params); break; } catch (err) { retries -= 1; if (retries <= 0) throw err; - if (err.message !== err_msg) throw err; + if (err.message !== retry_err_msg) throw err; + if (success_err_codes && success_err_codes.includes(err.code)) return; // stat and extract mtimeNsBigint & ino again const stat = (await nb_native().fs.stat(fs_context, from_path)); - mtimeNsBigint = stat.mtimeNsBigint; - ino = stat.ino; - dbg.warn(`NamespaceFS: Retrying safe_move_posix ${err_msg.split(' ')[0]} retries=${retries}` + + params.mtimeNsBigint = stat.mtimeNsBigint; + params.ino = stat.ino; + dbg.warn(`NamespaceFS: Retrying safe posix ${retry_err_msg.split(' ')[0]} retries=${retries}` + ` from_path=${from_path} to_path=${to_path}`, err); } } @@ -1204,13 +1207,21 @@ class NamespaceFS { try { const fs_context = this.prepare_fs_context(object_sdk); await this._load_bucket(params, fs_context); - // TODO: impl version id and call _find_version_path instead of get_file_path - const file_path = this._get_file_path(params); + const file_path = await this._find_version_path(fs_context, params); await this._check_path_in_bucket_boundaries(fs_context, file_path); dbg.log0('NamespaceFS: delete_object', file_path); - await nb_native().fs.unlink(fs_context, file_path); - await this._delete_path_dirs(file_path, fs_context); - return {}; + let res; + if (this._is_versioning_disabled()) { + await nb_native().fs.unlink(fs_context, file_path); + await this._delete_path_dirs(file_path, fs_context); + } else if (this._is_versioning_enabled()) { + res = params.version_id ? + await this._delete_version_id(fs_context, file_path, params) : + await this._delete_latest_version(fs_context, file_path, params); + } else { + throw new Error('TODO'); + } + return res || {}; } catch (err) { throw this._translate_object_error_codes(err); } @@ -1220,16 +1231,43 @@ class NamespaceFS { try { const fs_context = this.prepare_fs_context(object_sdk); await this._load_bucket(params, fs_context); - for (const { key } of params.objects) { - // TODO: impl version id and call _find_version_path instead of get_file_path - const file_path = this._get_file_path({ key }); - await this._check_path_in_bucket_boundaries(fs_context, file_path); - dbg.log0('NamespaceFS: delete_multiple_objects', file_path); - await nb_native().fs.unlink(fs_context, file_path); - await this._delete_path_dirs(file_path, fs_context); + let res = []; + if (this._is_versioning_disabled()) { + for (const { key, version } of params.objects) { + if (version) { + res.push({}); + continue; + } + try { + const file_path = this._get_file_path({ key }); + await this._check_path_in_bucket_boundaries(fs_context, file_path); + dbg.log0('NamespaceFS: delete_multiple_objects', file_path); + await nb_native().fs.unlink(fs_context, file_path); + await this._delete_path_dirs(file_path, fs_context); + res.push({ key }); + } catch (err) { + res.push({ err_code: err.code, err_message: err.message }); + } + } + } else { + // [{key: a, version: 1}, {key: a, version: 2}, {key:b, version: 1}] => {'a': [1, 2], 'b': [1]} + const versions_by_key_map = {}; + for (const { key, version_id } of params.objects) { + if (versions_by_key_map[key]) versions_by_key_map[key].push(version_id); + else versions_by_key_map[key] = [version_id]; + } + dbg.log3('NamespaceFS: versions_by_key_map', versions_by_key_map); + + if (this._is_versioning_enabled()) { + for (const key of Object.keys(versions_by_key_map)) { + const key_res = await this._delete_objects_versioning_enabled(fs_context, key, versions_by_key_map[key]); + res = res.concat(key_res); + } + } else { + throw new Error('TODO'); + } } - // TODO return deletion reponse per key - return params.objects.map(() => ({})); + return res; } catch (err) { throw this._translate_object_error_codes(err); } @@ -1336,13 +1374,15 @@ class NamespaceFS { return fs_xattr; } - async _assign_versions_to_fs_xattr(fs_context, cur_ver_path, new_ver_file, fs_xattr) { - const cur_ver_info = await this._get_version_info(fs_context, cur_ver_path); - const new_ver_stat = await new_ver_file.stat(fs_context); + async _assign_versions_to_fs_xattr(fs_context, prev_ver_info, new_ver_stat, key, fs_xattr, delete_marker) { + if (!prev_ver_info) prev_ver_info = await this.find_max_version_past(fs_context, key); + fs_xattr = Object.assign(fs_xattr || {}, { - [XATTR_VERSION_ID]: this._get_version_id_by_stat(new_ver_stat), - [XATTR_PREV_VERSION_ID]: cur_ver_info && cur_ver_info.version_id_str + [XATTR_VERSION_ID]: this._get_version_id_by_stat(new_ver_stat) }); + if (prev_ver_info) fs_xattr[XATTR_PREV_VERSION_ID] = prev_ver_info.version_id_str; + if (delete_marker) fs_xattr[XATTR_DELETE_MARKER] = delete_marker; + return fs_xattr; } @@ -1698,11 +1738,20 @@ class NamespaceFS { return path.normalize(path.join(this.bucket_path, path.dirname(key), HIDDEN_VERSIONS_PATH, key_version)); } + async _generate_unique_path(fs_context) { + const rand_id = uuidv4(); + const unique_temp_path = path.join(this.bucket_path, this.get_bucket_tmpdir(), 'lost+found', rand_id); + await this._make_path_dirs(unique_temp_path, fs_context); + return unique_temp_path; + } // this function returns the following version information - // version_id_str - mtime-{mtimeNsBigint}-ino-{ino} | explicit null // mtimeNsBigint - modified timestmap in bigint - last time the content of the file was modified // ino - refers to the data stored in a particular location + // delete_marker - specifies if the version is a delete marker + // prev_version_id - specifies the previous version of the wanted version + // path - specifies the path to version // if version xattr contains version info - return info by xattr // else - it's a null version - return stat async _get_version_info(fs_context, version_path) { @@ -1712,7 +1761,11 @@ class NamespaceFS { const version_id_str = this._get_version_id_by_xattr(stat); const ver_info_by_xattr = this._extract_version_info_from_xattr(version_id_str); - return { ...(ver_info_by_xattr || stat), version_id_str }; + return { ...(ver_info_by_xattr || stat), version_id_str, + delete_marker: stat.xattr[XATTR_DELETE_MARKER], + prev_version_id: stat.xattr[XATTR_PREV_VERSION_ID], + path: version_path + }; } catch (err) { if (err.code !== 'ENOENT') throw err; dbg.warn('NamespaceFS._get_version_info version doesn\'t exist', err); @@ -1759,6 +1812,226 @@ class NamespaceFS { throw new RpcError('BAD_REQUEST', 'Bad Request'); } } + + // 1. iterate over the key's versions array + // 1.1 if version_id is undefined, delete latest + // 1.2 if version exists - unlink version + // 2. try promote second latest to latest if one of the deleted versions is the latest version (with version id specified) or a delete marker + async _delete_objects_versioning_enabled(fs_context, key, versions) { + let res = []; + let deleted_delete_marker = false; + let delete_marker_created = false; + let latest_ver_info; + const latest_version_path = this._get_file_path({ key }); + await this._check_path_in_bucket_boundaries(fs_context, latest_version_path); + for (const version_id of versions) { + try { + if (version_id) { + const file_path = await this._find_version_path(fs_context, { key, version_id }); + await this._check_path_in_bucket_boundaries(fs_context, file_path); + const version_info = await this._get_version_info(fs_context, file_path); + if (!version_info) { + res.push({}); + continue; + } + const deleted_latest = file_path === latest_version_path; + if (deleted_latest) { + await this._wrap_safe_op_with_retries( + fs_context, + async ({ mtimeNsBigint, ino }) => { + const unique_temp_path = await this._generate_unique_path(fs_context); + await nb_native().fs.safe_unlink(fs_context, file_path, unique_temp_path, mtimeNsBigint, ino); + }, + Object.assign(version_info, { from_path: file_path }), + 'FS::SafeUnlink ERROR unlink target doesn\'t match expected inode and mtime', + ['ENOENT'] + ); + latest_ver_info = version_info; + } else { + await nb_native().fs.unlink(fs_context, file_path); + if (!deleted_delete_marker) deleted_delete_marker = version_info.delete_marker; + } + res.push({ deleted_delete_marker: version_info.delete_marker }); + } else { + const version_res = await this._delete_latest_version(fs_context, latest_version_path, { key, version_id }); + res.push(version_res); + delete_marker_created = true; + } + } catch (err) { + res.push({ err_code: err.code, err_message: err.message }); + } + } + // we try promote only if the latest version was deleted or we deleted a delete marker + // and no delete marker added (a new delete marker will be the latest - no need to promote) + if ((latest_ver_info || deleted_delete_marker) && !delete_marker_created) { + await this._promote_version_to_latest(fs_context, { key }, latest_ver_info, latest_version_path); + } + // delete .versions/ if it's empty + const file_path = this._get_version_path(key); + await this._delete_path_dirs(file_path, fs_context); + return res; + } + + // delete version_id - + // 1. get version info, if it's empty - return + // 2. unlink key + // 3. promote second latest -> latest + async _delete_version_id(fs_context, file_path, params) { + const version_info = await this._get_version_info(fs_context, file_path); + if (!version_info) return {}; + + const latest_version_path = this._get_file_path({ key: params.key }); + const deleted_latest = file_path === latest_version_path; + + if (deleted_latest) { + // TODO: GPFS call unlinkat with fd verification, no need to do it when it's latest and there is a version to promote + await this._wrap_safe_op_with_retries( + fs_context, + async ({ mtimeNsBigint, ino }) => { + const unique_temp_path = await this._generate_unique_path(fs_context); + await nb_native().fs.safe_unlink(fs_context, file_path, unique_temp_path, mtimeNsBigint, ino); + }, + Object.assign(version_info, { from_path: file_path }), + 'FS::SafeUnlink ERROR unlink target doesn\'t match expected inode and mtime', + ['ENOENT'] + ); + } else { + await nb_native().fs.unlink(fs_context, file_path); + } + // we try promote only if the latest version was deleted or we deleted a delete marker + if (deleted_latest || version_info.delete_marker) { + await this._promote_version_to_latest(fs_context, params, version_info, latest_version_path); + } + await this._delete_path_dirs(file_path, fs_context); + return { + deleted_delete_marker: version_info.delete_marker + }; + } + + // 1. if deleted version is not latest version and not a delete marker - skip + // 2. find max past version + // 2.1. if max_past_version does not exist / is a delete marker - skip, nothing to move + // 2.2. else - move max past version -> latest version path + // 3. if deleted version mtime < max_past_version mtime - skip (check if deleted version is latest or latest delete marker in .versions/) + // 4. move max past version -> latest version path + // condition 2 guards on situations where we don't want to try move max version past to latest + async _promote_version_to_latest(fs_context, params, deleted_version_info, latest_ver_path) { + dbg.log1('Namespace_fd._promote_version_to_latest', params, deleted_version_info, latest_ver_path); + const deleted_latest = deleted_version_info && deleted_version_info.path === latest_ver_path; + const prev_version_id = deleted_latest && deleted_version_info.prev_version_id; + + let retries = config.NSFS_RENAME_RETRIES; + for (;;) { + try { + const latest_version_info = await this._get_version_info(fs_context, latest_ver_path); + if (latest_version_info) return; + const max_past_ver_info = (prev_version_id && + (await this.get_prev_version_info(fs_context, params.key, prev_version_id))) || + (await this.find_max_version_past(fs_context, params.key)); + + if (!max_past_ver_info || max_past_ver_info.delete_marker) return; + // 2 - if deleted file is a delete marker and is older than max past version - no need to promote max - return + if (deleted_version_info && + deleted_version_info.delete_marker && + deleted_version_info.mtimeNsBigint < max_past_ver_info.mtimeNsBigint) return; + await this.safe_move_posix(fs_context, max_past_ver_info.path, latest_ver_path, max_past_ver_info); + break; + } catch (err) { + retries -= 1; + if (retries <= 0) throw err; + if (!this._is_gpfs(fs_context) && err.code === 'EEXIST') { + dbg.warn('Namespace_fs._delete_version_id: latest version exist - skipping'); + return; + } + if (err.code !== 'ENOENT') throw err; + dbg.warn(`NamespaceFS: _promote_version_to_latest failed retries=${retries}`, err); + } + } + } + + // delete latest version - + // 1. if latest version exist - safe move key .versions/key_{version_id} + // 2. else - latest version is a delete marker / doesn't exist - nothing to move + // 3. create delete marker and move it to .versions/key_{delete_marker_version_id} + async _delete_latest_version(fs_context, file_path, params) { + const version_info = await this._get_version_info(fs_context, file_path); + let to_delete_version_path; + if (version_info) { + to_delete_version_path = this._get_version_path(params.key, version_info.version_id_str); + await this._make_path_dirs(to_delete_version_path, fs_context); + await this.safe_move_posix(fs_context, file_path, to_delete_version_path, version_info); + } + + const created_version_id = await this._create_delete_marker(fs_context, params, version_info); + return { + created_delete_marker: true, + created_version_id + }; + } + + // TODO: support GPFS + async _create_delete_marker(fs_context, params, deleted_version_info) { + let retries = config.NSFS_RENAME_RETRIES; + let upload_params; + let delete_marker_version_id; + for (;;) { + try { + upload_params = await this._start_upload(fs_context, undefined, undefined, params, 'w'); + + // the delete marker path built from its version info (mtime + ino) + const stat = await upload_params.target_file.stat(fs_context); + delete_marker_version_id = this._get_version_id_by_stat(stat); + const file_path = this._get_version_path(params.key, delete_marker_version_id); + + // finish upload part + const fs_xattr = await this._assign_versions_to_fs_xattr(fs_context, deleted_version_info, + stat, params.key, undefined, true); + if (fs_xattr) await upload_params.target_file.replacexattr(fs_context, fs_xattr); + await nb_native().fs.rename(fs_context, upload_params.upload_path, file_path); + return delete_marker_version_id; + } catch (err) { + retries -= 1; + if (retries <= 0) throw err; + if (err.code === 'EEXIST') { + dbg.warn(`NamespaceFS: _create_delete_marker already exists, success`, err); + return delete_marker_version_id; + } + dbg.warn(`NamespaceFS: _create_delete_marker failed retries=${retries}`, err); + } finally { + if (upload_params) await this.complete_object_upload_finally(undefined, undefined, upload_params.target_file, fs_context); + } + } + } + + async get_prev_version_info(fs_context, key, prev_version_id) { + const prev_path = this._get_version_path(key, prev_version_id); + const prev_path_info = await this._get_version_info(fs_context, prev_path); + return prev_path_info; + } + + // try find prev version by hint or by iterating on .versions/ dir + async find_max_version_past(fs_context, key) { + const versions_dir = path.normalize(path.join(this.bucket_path, path.dirname(key), HIDDEN_VERSIONS_PATH)); + try { + const versions = await nb_native().fs.readdir(fs_context, versions_dir); + const arr = await P.map_with_concurrency(10, versions, async entry => { + const index = entry.name.endsWith('_null') ? entry.name.lastIndexOf('_null') : entry.name.lastIndexOf('_mtime-'); + // don't fail if version entry name is invalid, just keep searching + if (index < 0 || entry.name.slice(0, index) !== key) return undefined; + const { mtimeNsBigint } = this._extract_version_info_from_xattr(entry.name.slice(key.length + 1)) || + (await this._get_version_info(fs_context, path.join(versions_dir, entry.name))); + return { mtimeNsBigint, name: entry.name }; + }); + + // find max past version by comparing the mtimeNsBigint val + const max_entry_info = arr.reduce((acc, cur) => (cur && cur.mtimeNsBigint > acc.mtimeNsBigint ? cur : acc), + { mtimeNsBigint: BigInt(0), name: undefined }); + return max_entry_info.mtimeNsBigint > BigInt(0) && + this._get_version_info(fs_context, path.join(versions_dir, max_entry_info.name)); + } catch (err) { + dbg.warn('namespace_fs.find_max_version_past: .versions/ folder could not be found', err); + } + } } module.exports = NamespaceFS; diff --git a/src/test/unit_tests/test_bucketspace_versioning.js b/src/test/unit_tests/test_bucketspace_versioning.js index d5778b7e01..cfd5ecee13 100644 --- a/src/test/unit_tests/test_bucketspace_versioning.js +++ b/src/test/unit_tests/test_bucketspace_versioning.js @@ -1,5 +1,5 @@ /* Copyright (C) 2020 NooBaa */ -/*eslint max-lines-per-function: ["error", 600]*/ +/*eslint max-lines-per-function: ["error", 1000]*/ 'use strict'; @@ -10,6 +10,7 @@ const assert = require('assert'); const coretest = require('./coretest'); const { rpc_client, EMAIL } = coretest; const fs_utils = require('../../util/fs_utils'); +const size_utils = require('../../util/size_utils'); const path = require('path'); const nb_native = require('../../util/nb_native'); coretest.setup({ pools_to_create: [coretest.POOL_LIST[1]] }); @@ -17,6 +18,7 @@ coretest.setup({ pools_to_create: [coretest.POOL_LIST[1]] }); const MAC_PLATFORM = 'darwin'; const XATTR_VERSION_ID = 'user.version_id'; const XATTR_PREV_VERSION_ID = 'user.prev_version_id'; +const XATTR_DELETE_MARKER = 'user.delete_marker'; const DEFAULT_FS_CONFIG = { uid: process.getuid(), @@ -134,39 +136,38 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { await rpc_client.account.delete_account({ email }); } }); + mocha.describe('put/get versioning', function() { + mocha.it('put object - versioning disabled - to be enabled', async function() { + await s3_uid6.putObject({ Bucket: bucket_name, Key: disabled_key, Body: body1 }).promise(); + }); - mocha.it('put object - versioning disabled - to be enabled', async function() { - await s3_uid6.putObject({ Bucket: bucket_name, Key: disabled_key, Body: body1 }).promise(); - }); - - mocha.it('put object - versioning disabled bucket', async function() { - await s3_uid6.putObject({ Bucket: disabled_bucket_name, Key: disabled_key, Body: body1 }).promise(); - }); - - mocha.it('set bucket versioning - Enabled - should fail - no permissions', async function() { - try { - await s3_uid5.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); - assert.fail(`put bucket versioning succeeded for account without permissions`); - } catch (err) { - assert.equal(err.code, 'AccessDenied'); - } - }); + mocha.it('put object - versioning disabled bucket', async function() { + await s3_uid6.putObject({ Bucket: disabled_bucket_name, Key: disabled_key, Body: body1 }).promise(); + }); - mocha.it('set bucket versioning - Enabled - admin - should fail - no permissions', async function() { - try { - await s3_admin.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); - assert.fail(`put bucket versioning succeeded for account without permissions`); - } catch (err) { - assert.equal(err.code, 'AccessDenied'); - } - }); + mocha.it('set bucket versioning - Enabled - should fail - no permissions', async function() { + try { + await s3_uid5.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); + assert.fail(`put bucket versioning succeeded for account without permissions`); + } catch (err) { + assert.equal(err.code, 'AccessDenied'); + } + }); + mocha.it('set bucket versioning - Enabled - admin - should fail - no permissions', async function() { + try { + await s3_admin.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); + assert.fail(`put bucket versioning succeeded for account without permissions`); + } catch (err) { + assert.equal(err.code, 'AccessDenied'); + } + }); - mocha.it('set bucket versioning - Enabled', async function() { - await s3_uid6.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); - const res = await s3_uid6.getBucketVersioning({ Bucket: bucket_name }).promise(); - assert.equal(res.Status, 'Enabled'); + mocha.it('set bucket versioning - Enabled', async function() { + await s3_uid6.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); + const res = await s3_uid6.getBucketVersioning({ Bucket: bucket_name }).promise(); + assert.equal(res.Status, 'Enabled'); + }); }); - mocha.describe('versioning enabled', function() { mocha.describe('put object', function() { @@ -346,7 +347,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('copy object - version does not exist - should fail', async function() { try { await s3_uid6.copyObject({ Bucket: bucket_name, Key: copied_key5, - CopySource: `${bucket_name}/${key1}?versionId=123`}).promise(); + CopySource: `${bucket_name}/${key1}?versionId=mtime-123-ino-123`}).promise(); assert.fail('should have failed'); } catch (err) { assert.equal(err.code, 'NoSuchKey'); @@ -378,6 +379,628 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { const comp_res = await compare_version_ids(full_path, key, res.VersionId); assert.ok(comp_res); }); + + + mocha.it('delete object latest - create dm & move latest -> .versions/', async function() { + const prev_version_id = await stat_and_get_version_id(full_path, disabled_key); + const max_version1 = await find_max_version_past(full_path, disabled_key, ''); + const res = await s3_uid6.deleteObject({ Bucket: bucket_name, Key: disabled_key }).promise(); + assert.equal(res.DeleteMarker, true); + + await fs_utils.file_must_not_exist(path.join(full_path, disabled_key)); + const exist = await version_file_exists(full_path, disabled_key, '', prev_version_id); + assert.ok(exist); + const max_version2 = await find_max_version_past(full_path, disabled_key, ''); + assert.notEqual(max_version2, max_version1); + const is_dm = await is_delete_marker(full_path, '', disabled_key, max_version2); + assert.ok(is_dm); + assert.equal(res.VersionId, max_version2); + + }); + mocha.it('delete object - create dm & move latest -> .versions/ - 1st', async function() { + const prev_version_id = await stat_and_get_version_id(full_path, key1); + const max_version1 = await find_max_version_past(full_path, key1, ''); + const res = await s3_uid6.deleteObject({ Bucket: bucket_name, Key: key1 }).promise(); + assert.equal(res.DeleteMarker, true); + + await fs_utils.file_must_not_exist(path.join(full_path, key1)); + const exist = await version_file_exists(full_path, key1, '', prev_version_id); + assert.ok(exist); + const max_version2 = await find_max_version_past(full_path, key1, ''); + assert.notEqual(max_version2, max_version1); + const is_dm = await is_delete_marker(full_path, '', key1, max_version2); + assert.ok(is_dm); + assert.equal(res.VersionId, max_version2); + + }); + + mocha.it('delete object - create dm & move latest -> .versions/ - 2nd time', async function() { + const max_version1 = await find_max_version_past(full_path, key1, ''); + const res = await s3_uid6.deleteObject({ Bucket: bucket_name, Key: key1 }).promise(); + assert.equal(res.DeleteMarker, true); + + await fs_utils.file_must_not_exist(path.join(full_path, key1)); + const exist = await version_file_exists(full_path, key1, '', max_version1); + assert.ok(exist); + const max_version2 = await find_max_version_past(full_path, key1, ''); + assert.notEqual(max_version2, max_version1); + assert.equal(max_version2, res.VersionId); + const is_dm = await is_delete_marker(full_path, '', key1, max_version2); + assert.ok(is_dm); + }); + }); + + mocha.describe('delete object', function() { + const delete_object_test_bucket_reg = 'delete-object-test-bucket-reg'; + const delete_object_test_bucket_null = 'delete-object-test-bucket-null'; + const delete_object_test_bucket_dm = 'delete-object-test-bucket-dm'; + + const full_delete_path = tmp_fs_root + '/' + delete_object_test_bucket_reg; + const full_delete_path_null = tmp_fs_root + '/' + delete_object_test_bucket_null; + const full_delete_path_dm = tmp_fs_root + '/' + delete_object_test_bucket_dm; + + let account_with_access; + mocha.describe('delete object - versioning enabled', function() { + mocha.describe('delete object - regular version - versioning enabled', async function() { + mocha.before(async function() { + const res = await generate_nsfs_account({ default_resource: nsr }); + account_with_access = generate_s3_client(res.access_key, res.secret_key); + await account_with_access.createBucket({ Bucket: delete_object_test_bucket_reg }).promise(); + await put_allow_all_bucket_policy(s3_admin, delete_object_test_bucket_reg); + await account_with_access.createBucket({ Bucket: delete_object_test_bucket_null }).promise(); + await put_allow_all_bucket_policy(s3_admin, delete_object_test_bucket_null); + await account_with_access.createBucket({ Bucket: delete_object_test_bucket_dm }).promise(); + await put_allow_all_bucket_policy(s3_admin, delete_object_test_bucket_dm); + }); + + mocha.it('delete version id - fake id - should fail with NoSuchKey', async function() { + const max_version1 = await find_max_version_past(full_path, key1, ''); + try { + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, Key: key1, VersionId: 'mtime-123-ino-123'}).promise(); + assert.fail('delete object should have failed on ENOENT'); + } catch (err) { + assert.equal(err.code, 'NoSuchKey'); + } + const max_version2 = await find_max_version_past(full_path, key1, ''); + assert.equal(max_version1, max_version2); + }); + + mocha.it('delete object version id - latest - second latest is null version', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_reg, key1, ['null', 'regular']); + const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1); + + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: upload_res_arr[1].VersionId }).promise(); + assert.equal(delete_res.VersionId, cur_version_id1); + + const cur_version_id2 = await stat_and_get_version_id(full_delete_path, key1); + assert.notEqual(cur_version_id1, cur_version_id2); + assert.equal('null', cur_version_id2); + await fs_utils.file_must_not_exist(path.join(full_delete_path, key1 + '_' + upload_res_arr[1].VersionId)); + const max_version1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(max_version1, undefined); + await delete_object_versions(full_delete_path, key1); + }); + + mocha.it('delete object version id - latest - second latest is delete marker version ', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_reg, key1, ['regular', 'delete_marker', 'regular']); + const max_version0 = await find_max_version_past(full_delete_path, key1, ''); + const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1); + assert.equal(upload_res_arr[2].VersionId, cur_version_id1); + const cur_ver_info = await stat_and_get_all(full_delete_path, key1); + assert.equal(cur_ver_info.xattr[XATTR_PREV_VERSION_ID], max_version0); + const is_dm = await is_delete_marker(full_delete_path, '', key1, max_version0); + assert.ok(is_dm); + + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: upload_res_arr[2].VersionId }).promise(); + assert.equal(delete_res.VersionId, cur_version_id1); + await fs_utils.file_must_not_exist(path.join(full_delete_path, key1)); + const max_version1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(max_version1, max_version0); + await delete_object_versions(full_delete_path, key1); + }); + + mocha.it('delete object version id - in .versions/', async function() { + const put_res = await account_with_access.putObject({ + Bucket: delete_object_test_bucket_reg, Key: key1, Body: body1 }).promise(); + await account_with_access.putObject({ Bucket: delete_object_test_bucket_reg, Key: key1, Body: body1 }).promise(); + const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1); + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: put_res.VersionId }).promise(); + assert.equal(put_res.VersionId, delete_res.VersionId); + const cur_version_id2 = await stat_and_get_version_id(full_delete_path, key1); + assert.equal(cur_version_id1, cur_version_id2); + const exist = await version_file_must_not_exists(full_delete_path, key1, '', put_res.VersionId); + assert.ok(exist); + const max_version1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(max_version1, undefined); + }); + + mocha.it('delete object version id - latest - no second latest', async function() { + const cur_version_id = await stat_and_get_version_id(full_delete_path, key1); + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: cur_version_id }).promise(); + await fs_utils.file_must_not_exist(path.join(full_delete_path, key1 + '_' + cur_version_id)); + const max_version1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(max_version1, undefined); + }); + + mocha.it('delete object version id - in .versions/ 2 - latest exist and it\'s a regular version', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_reg, key1, ['regular', 'regular', 'regular']); + + const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1); + assert.equal(cur_version_id1, upload_res_arr[2].VersionId); + + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: upload_res_arr[1].VersionId }).promise(); + assert.equal(upload_res_arr[1].VersionId, delete_res.VersionId); + const cur_version_id2 = await stat_and_get_version_id(full_delete_path, key1); + assert.equal(cur_version_id1, cur_version_id2); + const exist = await version_file_must_not_exists(full_delete_path, key1, '', upload_res_arr[1].VersionId); + assert.ok(exist); + const max_version1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(max_version1, upload_res_arr[0].VersionId); + await delete_object_versions(full_delete_path, key1); + }); + + mocha.it('delete object version id - in .versions/ 3 - latest exist and it\'s a delete marker', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_reg, key1, ['regular', 'regular', 'delete_marker']); + + await fs_utils.file_must_not_exist(path.join(full_delete_path, key1)); + const latest_dm_version_id1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(latest_dm_version_id1, upload_res_arr[2].VersionId); + + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: upload_res_arr[1].VersionId }).promise(); + assert.equal(upload_res_arr[1].VersionId, delete_res.VersionId); + + await fs_utils.file_must_not_exist(path.join(full_delete_path, key1)); + const latest_dm_version_id2 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(latest_dm_version_id1, latest_dm_version_id2); + const version_deleted = await version_file_must_not_exists(full_delete_path, key1, '', upload_res_arr[1].VersionId); + assert.ok(version_deleted); + await delete_object_versions(full_delete_path, key1); + }); + + mocha.it('delete object version id - latest - second latest is regular version ', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_reg, key1, ['regular', 'regular']); + const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_reg, + Key: key1, VersionId: upload_res_arr[1].VersionId }).promise(); + + const cur_version_id2 = await stat_and_get_version_id(full_delete_path, key1); + + assert.notEqual(cur_version_id1, cur_version_id2); + assert.equal(upload_res_arr[0].VersionId, cur_version_id2); + await fs_utils.file_must_not_exist(path.join(full_delete_path, key1 + '_' + upload_res_arr[1].VersionId)); + const max_version1 = await find_max_version_past(full_delete_path, key1, ''); + assert.equal(max_version1, undefined); + await delete_object_versions(full_delete_path, key1); + }); + + mocha.it('delete object version null - latest, no second latest', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_null, key1, ['null']); + const cur_version_id1 = await stat_and_get_version_id(full_delete_path_null, key1); + assert.equal(upload_res_arr[0].VersionId, undefined); + assert.equal(cur_version_id1, 'null'); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_null, + Key: key1, VersionId: 'null' }).promise(); + + await fs_utils.file_must_not_exist(path.join(full_delete_path_null, key1)); + const max_version1 = await find_max_version_past(full_delete_path_null, key1, ''); + assert.equal(max_version1, undefined); + await delete_object_versions(full_delete_path_null, key1); + }); + + mocha.it('delete object version null - version is in .versions/', async function() { + const upload_res_arr = await upload_object_versions(account_with_access, delete_object_test_bucket_null, key1, ['null', 'regular']); + const cur_version_id1 = await stat_and_get_version_id(full_delete_path_null, key1); + assert.equal(upload_res_arr[0].VersionId, undefined); + assert.notEqual(cur_version_id1, 'null'); + assert.equal(cur_version_id1, upload_res_arr[1].VersionId); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_null, + Key: key1, VersionId: 'null' }).promise(); + + const max_version1 = await find_max_version_past(full_delete_path_null, key1, ''); + assert.equal(max_version1, undefined); + const cur_version_id2 = await stat_and_get_version_id(full_delete_path_null, key1); + assert.equal(cur_version_id1, cur_version_id2); + + await delete_object_versions(full_delete_path_null, key1); + }); + + mocha.it('delete object version delete marker - latest - second latest is a null version', async function() { + await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['null', 'delete_marker']); + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const max_version = await find_max_version_past(full_delete_path_dm, key1, ''); + const second_max_version1 = await find_max_version_past(full_delete_path_dm, key1, '', [max_version]); + + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: max_version }).promise(); + + assert.equal(delete_res.DeleteMarker, true); + assert.equal(delete_res.VersionId, max_version); + + const max_version1 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(max_version1, undefined); + await fs_utils.file_must_exist(path.join(full_delete_path_dm, key1)); + await version_file_must_not_exists(full_delete_path_dm, key1, '', second_max_version1); + const new_latest_ver_id = await stat_and_get_version_id(full_delete_path_dm, key1); + assert.equal(new_latest_ver_id, 'null'); + + await delete_object_versions(full_delete_path_dm, key1); + }); + + mocha.it('delete object version delete marker - non latest', async function() { + await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['regular', 'delete_marker', 'delete_marker']); + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const max_version = await find_max_version_past(full_delete_path_dm, key1, ''); + const second_max_version1 = await find_max_version_past(full_delete_path_dm, key1, '', [max_version]); + + const delete_res = await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: second_max_version1 }).promise(); + + assert.equal(delete_res.DeleteMarker, true); + assert.equal(delete_res.VersionId, second_max_version1); + + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + await version_file_must_not_exists(full_delete_path, key1, '', second_max_version1); + const max_version1 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(max_version1, max_version); + + await delete_object_versions(full_delete_path_dm, key1); + }); + + mocha.it('delete object version delete marker - latest - second latest is a delete marker', async function() { + await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['regular', 'delete_marker', 'delete_marker']); + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const max_version = await find_max_version_past(full_delete_path_dm, key1, ''); + const second_max_version1 = await find_max_version_past(full_delete_path_dm, key1, '', [max_version]); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: max_version }).promise(); + + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + await version_file_exists(full_delete_path_dm, key1, '', second_max_version1); + const max_version1 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(max_version1, second_max_version1); + + await delete_object_versions(full_delete_path_dm, key1); + }); + + mocha.it('delete object version delete marker - latest - second latest is a regular version', async function() { + const put_res = await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['regular', 'regular', 'delete_marker']); + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const max_version = await find_max_version_past(full_delete_path_dm, key1, ''); + const second_max_version1 = await find_max_version_past(full_delete_path_dm, key1, '', [max_version]); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: max_version }).promise(); + + await fs_utils.file_must_exist(path.join(full_delete_path_dm, key1)); + await version_file_must_not_exists(full_delete_path_dm, key1, '', second_max_version1); + const max_version1 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.notEqual(max_version1, second_max_version1); + assert.equal(put_res[0].VersionId, max_version1); + + await delete_object_versions(full_delete_path_dm, key1); + }); + + mocha.it('delete object version delete marker - latest - no second latest', async function() { + const put_res = await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['delete_marker']); + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const max_version = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(put_res[0].VersionId, max_version); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: max_version }).promise(); + + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + await version_file_must_not_exists(full_delete_path_dm, key1, '', max_version); + const max_version1 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(max_version1, undefined); + await delete_object_versions(full_delete_path_dm, key1); + }); + + mocha.it('delete object version delete marker - in .versions/ - latest exist', async function() { + const put_res = await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['regular', 'delete_marker', 'regular']); + const ltst_version_id1 = await stat_and_get_version_id(full_delete_path_dm, key1); + const max_version = await find_max_version_past(full_delete_path_dm, key1, ''); + const second_max_version1 = await find_max_version_past(full_delete_path_dm, key1, '', [max_version]); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: max_version }).promise(); + + await fs_utils.file_must_exist(path.join(full_delete_path_dm, key1)); + const ltst_version_id2 = await stat_and_get_version_id(full_delete_path_dm, key1); + assert.equal(ltst_version_id1, ltst_version_id2); + await version_file_exists(full_delete_path_dm, key1, '', second_max_version1); + const max_version1 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(max_version1, second_max_version1); + assert.equal(put_res[0].VersionId, max_version1); + + await delete_object_versions(full_delete_path_dm, key1); + }); + + mocha.it('delete object version delete marker - in .versions/ - latest is a delete marker', async function() { + const put_res = await upload_object_versions(account_with_access, delete_object_test_bucket_dm, key1, ['regular', 'delete_marker', 'delete_marker']); + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const latest_dm1 = await find_max_version_past(full_delete_path_dm, key1, ''); + + await account_with_access.deleteObject({ Bucket: delete_object_test_bucket_dm, + Key: key1, VersionId: put_res[1].VersionId }).promise(); + + await fs_utils.file_must_not_exist(path.join(full_delete_path_dm, key1)); + const latest_dm2 = await find_max_version_past(full_delete_path_dm, key1, ''); + assert.equal(latest_dm1, latest_dm2); + await delete_object_versions(full_delete_path_dm, key1); + }); + }); + }); +}); + mocha.describe('delete multiple objects', function() { + const delete_multi_object_test_bucket = 'delete-multi-object-test-bucket'; + const full_multi_delete_path = tmp_fs_root + '/' + delete_multi_object_test_bucket; + let account_with_access; + + mocha.before(async function() { + const res = await generate_nsfs_account({ default_resource: nsr }); + account_with_access = generate_s3_client(res.access_key, res.secret_key); + await account_with_access.createBucket({ Bucket: delete_multi_object_test_bucket }).promise(); + await put_allow_all_bucket_policy(s3_admin, delete_multi_object_test_bucket); + }); + + mocha.it('delete multiple objects - no version id - versioning disabled', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(80000); + let keys = []; + for (let i = 0; i < 50; i++) { + let random_key = (Math.random() + 1).toString(36).substring(7); + keys.push(random_key); + await upload_object_versions(account_with_access, delete_multi_object_test_bucket, random_key, ['null']); + } + const to_delete_arr = keys.map(key => ({ Key: key })); + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: to_delete_arr } }).promise(); + assert.equal(delete_res.Deleted.length, 50); + assert.deepStrictEqual(delete_res.Deleted, to_delete_arr); + for (let res of delete_res.Deleted) { + assert.equal(res.DeleteMarker, undefined); + assert.equal(res.VersionId, undefined); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + await fs_utils.file_must_not_exist(versions_dir); + let objects = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, full_multi_delete_path); + assert.equal(objects.length, 1); + assert.ok(objects[0].name.startsWith('.noobaa-nsfs_')); + + }); + + mocha.it('delete multiple objects - no version id', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + let versions_type_arr = ['null']; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); + } + await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let arr = []; + for (let i = 0; i < 200; i++) { + arr.push({ Key: 'a' }); + } + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 200); + for (let res of delete_res.Deleted) { + assert.equal(res.DeleteMarker, true); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + assert.equal(versions.length, 501); + await delete_object_versions(full_multi_delete_path, key1); + await delete_object_versions(full_multi_delete_path, 'a'); + }); + + mocha.it('delete multiple objects - delete only delete markers', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + let versions_type_arr = []; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); + } + let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let arr = []; + for (let i = 0; i < 300; i++) { + if (i % 2 === 1) arr.push({ Key: key1, VersionId: put_res[i].VersionId }); + } + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 150); + for (let res of delete_res.Deleted) { + assert.equal(res.DeleteMarker, true); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + assert.equal(versions.length, 149); + await fs_utils.file_must_exist(path.join(full_multi_delete_path, key1)); + let latest_stat = await stat_and_get_all(full_multi_delete_path, key1); + assert.equal(latest_stat.xattr[XATTR_VERSION_ID], put_res[298].VersionId); + await delete_object_versions(full_multi_delete_path, key1); + }); + + mocha.it('delete multiple objects - delete only regular versions key1, delete delete markers key2', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + const key2 = 'key2'; + let versions_type_arr = []; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); + } + let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let put_res2 = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key2, versions_type_arr); + let arr = []; + for (let i = 0; i < 300; i++) { + if (i % 2 === 0) arr.push({ Key: key1, VersionId: put_res[i].VersionId }); + if (i % 2 === 1) arr.push({ Key: key2, VersionId: put_res2[i].VersionId }); + } + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 300); + for (let res of delete_res.Deleted.slice(0, 150)) { + assert.equal(res.DeleteMarker, undefined); + } + for (let res of delete_res.Deleted.slice(150)) { + assert.equal(res.DeleteMarker, true); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + // 150 of key1 and 149 of key2 (latest version of key2 is in the parent dir) + assert.equal(versions.length, 299); + await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); + await fs_utils.file_must_exist(path.join(full_multi_delete_path, key2)); + let latest_dm_version = await find_max_version_past(full_multi_delete_path, key1); + const version_path = path.join(full_multi_delete_path, '.versions', key1 + '_' + latest_dm_version); + const version_info = await stat_and_get_all(version_path, ''); + assert.equal(version_info.xattr[XATTR_DELETE_MARKER], 'true'); + assert.equal(version_info.xattr[XATTR_VERSION_ID], put_res[299].VersionId); + await delete_object_versions(full_multi_delete_path, key1); + await delete_object_versions(full_multi_delete_path, key2); + }); + + mocha.it('delete multiple objects - delete regular versions & delete markers - new latest is dm', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + let versions_type_arr = []; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); + } + let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let arr = []; + for (let i = 200; i < 300; i++) { + arr.push({ Key: key1, VersionId: put_res[i].VersionId }); + } + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 100); + for (let i = 0; i < 100; i++) { + if (i % 2 === 1) assert.equal(delete_res.Deleted[i].DeleteMarker, true); + if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + assert.equal(versions.length, 200); + await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); + let latest_dm_version = await find_max_version_past(full_multi_delete_path, key1); + const version_path = path.join(full_multi_delete_path, '.versions', key1 + '_' + latest_dm_version); + const version_info = await stat_and_get_all(version_path, ''); + assert.equal(version_info.xattr[XATTR_VERSION_ID], put_res[199].VersionId); + await delete_object_versions(full_multi_delete_path, key1); + }); + + mocha.it('delete multiple objects - delete regular versions & delete markers - new latest is regular version', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + let versions_type_arr = []; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); + } + let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let arr = []; + for (let i = 100; i < 200; i++) { + arr.push({ Key: key1, VersionId: put_res[i].VersionId }); + } + arr.push({ Key: key1, VersionId: put_res[299].VersionId }); + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 101); + for (let i = 0; i < 100; i++) { + if (i % 2 === 1) assert.equal(delete_res.Deleted[i].DeleteMarker, true); + if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + + assert.equal(versions.length, 198); + await fs_utils.file_must_exist(path.join(full_multi_delete_path, key1)); + let latest_stat = await stat_and_get_all(full_multi_delete_path, key1); + assert.equal(latest_stat.xattr[XATTR_VERSION_ID], put_res[298].VersionId); + await delete_object_versions(full_multi_delete_path, key1); + }); + + mocha.it('delete multiple objects - delete keys & regular versions & delete markers ', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + let versions_type_arr = []; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); + } + let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let arr = []; + for (let i = 0; i < 50; i++) { + arr.push({ Key: key1 }); + } + for (let i = 100; i < 200; i++) { + arr.push({ Key: key1, VersionId: put_res[i].VersionId }); + } + + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 150); + for (let i = 0; i < 50; i++) { + assert.notEqual(delete_res.Deleted[i].DeleteMarkerVersionId, undefined); + assert.equal(delete_res.Deleted[i].DeleteMarker, true); + } + for (let i = 50; i < 150; i++) { + if (i % 2 === 1) assert.equal(delete_res.Deleted[i].DeleteMarker, true); + if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + + assert.equal(versions.length, 250); + await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); + await delete_object_versions(full_multi_delete_path, key1); + }); + + + mocha.it('delete multiple objects - delete regular versions & delete markers & latest & keys- ', async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(60000); + let versions_type_arr = []; + for (let i = 0; i < 300; i++) { + versions_type_arr.push(i % 2 === 1 ? 'regular' : 'delete_marker'); + } + let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + let arr = []; + for (let i = 200; i < 300; i++) { + arr.push({ Key: key1, VersionId: put_res[i].VersionId }); + } + + for (let i = 0; i < 50; i++) { + arr.push({ Key: key1 }); + } + const delete_res = await account_with_access.deleteObjects({ + Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); + assert.equal(delete_res.Deleted.length, 150); + for (let i = 0; i < 100; i++) { + if (i % 2 === 1) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); + if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, true); + } + for (let i = 100; i < 150; i++) { + assert.notEqual(delete_res.Deleted[i].DeleteMarkerVersionId, undefined); + assert.equal(delete_res.Deleted[i].DeleteMarker, true); + } + const versions_dir = path.join(full_multi_delete_path, '.versions'); + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + + assert.equal(versions.length, 250); + await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); + await delete_object_versions(full_multi_delete_path, key1); + }); }); }); @@ -385,28 +1008,123 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { /////// UTILS /////// +async function delete_object_versions(bucket_path, key) { + // delete past versions + const versions_dir = path.join(bucket_path, '.versions'); + try { + let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + + for (const entry of versions) { + if (entry.name.startsWith(key)) { + await fs_utils.file_delete(path.join(versions_dir, entry.name)); + } + } + } catch (err) { + console.log('find_max_version_past: .versions is missing'); + } + // delete latest version + await fs_utils.file_delete(path.join(bucket_path, key)); +} + +async function upload_object_versions(s3_client, bucket, key, object_types_arr) { + let res = []; + const versioning_status = await s3_client.getBucketVersioning({ Bucket: bucket }).promise(); + for (const obj_type of object_types_arr) { + if (obj_type === 'regular' || obj_type === 'null') { + if (!versioning_status.Status && obj_type === 'regular') { + await s3_client.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); + } + const random_body = (Math.random() + 1).toString(36).substring(7); + const put_res = await s3_client.putObject({ Bucket: bucket, Key: key, Body: random_body }).promise(); + res.push(put_res); + } else if (obj_type === 'delete_marker') { + if (!versioning_status.Status) { + await s3_client.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }).promise(); + } + const delete_res = await s3_client.deleteObject({ Bucket: bucket, Key: key }).promise(); + res.push(delete_res); + } + } + return res; +} +// add the prev xattr optimization +async function find_max_version_past(full_path, key, dir, skip_list) { + const versions_dir = path.join(full_path, dir || '', '.versions'); + try { + //let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + let max_mtime_nsec = 0; + let max_path; + const versions = (await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir)).filter(entry => { + const index = entry.name.endsWith('_null') ? entry.name.lastIndexOf('_null') : + entry.name.lastIndexOf('_mtime-'); + // don't fail if version entry name is invalid, just keep searching + return index > 0 && entry.name.slice(0, index) === key; + }); + for (const entry of versions) { + if (skip_list ? !skip_list.includes(entry.name.slice(key.length + 1)) : true) { + const version_str = entry.name.slice(key.length + 1); + const { mtimeNsBigint } = _extract_version_info_from_xattr(version_str) || + (await nb_native().fs.stat(DEFAULT_FS_CONFIG, path.join(versions_dir, entry.name))); + + if (mtimeNsBigint > max_mtime_nsec) { + max_mtime_nsec = mtimeNsBigint; + max_path = entry.name; + } + } + } + return max_path && max_path.slice(key.length + 1); + } catch (err) { + console.log('find_max_version_past: .versions is missing', err); + } +} + +function _extract_version_info_from_xattr(version_id_str) { + if (version_id_str === 'null') return; + const arr = version_id_str.split('mtime-').join('').split('-ino-'); + if (arr.length < 2) throw new Error('Invalid version_id_string, cannot extact attributes'); + return { mtimeNsBigint: size_utils.string_to_bigint(arr[0], 36), ino: parseInt(arr[1], 36) }; +} + + async function version_file_exists(full_path, key, dir, version_id) { const version_path = path.join(full_path, dir, '.versions', key + '_' + version_id); await fs_utils.file_must_exist(version_path); return true; } +async function version_file_must_not_exists(full_path, key, dir, version_id) { + const version_path = path.join(full_path, dir, '.versions', key + '_' + version_id); + await fs_utils.file_must_not_exist(version_path); + return true; +} + async function get_obj_and_compare_data(s3, bucket_name, key, expected_body) { const get_res = await s3.getObject({ Bucket: bucket_name, Key: key }).promise(); assert.equal(get_res.Body.toString(), expected_body); return true; } +async function is_delete_marker(full_path, dir, key, version) { + const version_path = path.join(full_path, dir, '.versions', key + '_' + version); + const stat = await nb_native().fs.stat(DEFAULT_FS_CONFIG, version_path); + return stat && stat.xattr[XATTR_DELETE_MARKER]; +} + async function stat_and_get_version_id(full_path, key) { const key_path = path.join(full_path, key); const stat = await nb_native().fs.stat(DEFAULT_FS_CONFIG, key_path); return get_version_id_by_xattr(stat); } +async function stat_and_get_all(full_path, key) { + const key_path = path.join(full_path, key); + const stat = await nb_native().fs.stat(DEFAULT_FS_CONFIG, key_path); + return stat; +} + async function compare_version_ids(full_path, key, put_result_version_id, prev_version_id) { const key_path = path.join(full_path, key); const stat = await nb_native().fs.stat(DEFAULT_FS_CONFIG, key_path); - console.log('STAT: ', stat); const new_version_id = get_version_id_by_stat(stat); const xattr_version_id = get_version_id_by_xattr(stat); assert.equal(new_version_id, put_result_version_id); @@ -427,6 +1145,25 @@ function get_version_id_by_xattr(stat, prev) { return (stat && stat.xattr[XATTR_VERSION_ID]) || 'null'; } +async function put_allow_all_bucket_policy(s3_client, bucket) { + const policy = { + Version: '2012-10-17', + Statement: [{ + Sid: 'id-1', + Effect: 'Allow', + Principal: { AWS: "*" }, + Action: ['s3:*'], + Resource: [`arn:aws:s3:::*`] + } + ] + }; + // create accounts + await s3_client.putBucketPolicy({ + Bucket: bucket, + Policy: JSON.stringify(policy) + }).promise(); +} + function generate_s3_client(access_key, secret_key) { return new AWS.S3({ s3ForcePathStyle: true, @@ -442,7 +1179,7 @@ function generate_s3_client(access_key, secret_key) { } async function generate_nsfs_account(options = {}) { - const { uid, gid, new_buckets_path, nsfs_only, admin } = options; + const { uid, gid, new_buckets_path, nsfs_only, admin, default_resource } = options; if (admin) { const account = await rpc_client.account.read_account({ email: EMAIL, @@ -465,7 +1202,8 @@ async function generate_nsfs_account(options = {}) { s3_access: true, email: `${random_name}@noobaa.com`, name: random_name, - nsfs_account_config + nsfs_account_config, + default_resource }); return { access_key: account.access_keys[0].access_key.unwrap(), diff --git a/src/test/unit_tests/test_namespace_fs.js b/src/test/unit_tests/test_namespace_fs.js index a0f04a2178..625f78e1b8 100644 --- a/src/test/unit_tests/test_namespace_fs.js +++ b/src/test/unit_tests/test_namespace_fs.js @@ -233,18 +233,18 @@ mocha.describe('namespace_fs', function() { }); mocha.describe('Get/Head object', function() { - const nsr = 'versioned-nsr'; - const bucket_name = 'versioned-bucket'; - const disabled_bucket_name = 'disabled-bucket'; - let tmp_fs_root = '/tmp/test_namespace_fs'; + const nsr = 'get-head-versioned-nsr'; + const bucket_name = 'get-head-versioned-bucket'; + const disabled_bucket_name = 'get-head-disabled-bucket'; + let tmp_fs_root = '/tmp/test_namespace_fs_get_head'; if (process.platform === MAC_PLATFORM) { tmp_fs_root = '/private/' + tmp_fs_root; } - const bucket_path = '/bucket'; + const bucket_path = '/get-head-bucket'; const vesion_dir = '/.versions'; const full_path = tmp_fs_root + bucket_path; - const disabled_bucket_path = '/disabled_bucket'; + const disabled_bucket_path = '/get-head-disabled_bucket'; const disabled_full_path = tmp_fs_root + disabled_bucket_path; const version_dir_path = full_path + vesion_dir; let file_pointer; @@ -263,6 +263,8 @@ mocha.describe('namespace_fs', function() { const key_version = en_version_key + '_' + en_version_key_v1; mocha.before(async function() { + const self = this; // eslint-disable-line no-invalid-this + self.timeout(300000); if (process.getgid() !== 0 || process.getuid() !== 0) { console.log('No Root permissions found in env. Skipping test'); this.skip(); // eslint-disable-line no-invalid-this @@ -338,7 +340,7 @@ mocha.describe('namespace_fs', function() { }); mocha.after(async () => { - //fs_utils.folder_delete(tmp_fs_root); + fs_utils.folder_delete(tmp_fs_root); for (let email of accounts) { await rpc_client.account.delete_account({ email }); } diff --git a/src/test/unit_tests/test_nsfs_versioning.js b/src/test/unit_tests/test_nsfs_versioning.js index cec33411d8..de9b6ba483 100644 --- a/src/test/unit_tests/test_nsfs_versioning.js +++ b/src/test/unit_tests/test_nsfs_versioning.js @@ -12,6 +12,7 @@ const crypto = require('crypto'); const buffer_utils = require('../../util/buffer_utils'); const util = require('util'); const path = require('path'); +const fs = require('fs'); const MAC_PLATFORM = 'darwin'; @@ -94,6 +95,7 @@ mocha.describe('namespace_fs - versioning', function() { const from_path = path.join(ns_tmp_bucket_path, file_key); const to_path = path.join(ns_tmp_bucket_path, file_key + '_mtime-1-ino-2'); const fake_mtime_ino = { mtimeNsBigint: BigInt(0), ino: 0 }; + const stat1 = await fs.promises.stat(from_path); const upload_res = await ns_tmp.safe_move_posix( dummy_object_sdk.requesting_account.nsfs_account_config, from_path, @@ -101,6 +103,9 @@ mocha.describe('namespace_fs - versioning', function() { fake_mtime_ino ); console.log('upload_object response', util.inspect(upload_res)); + const stat2 = await fs.promises.stat(to_path); + assert.equal(stat1.ino, stat2.ino); + await fs_utils.file_must_not_exist(from_path); }); mocha.it('safe move posix - Enabled - should fail', async function() {