diff --git a/src/cmd/manage_nsfs.js b/src/cmd/manage_nsfs.js index 64a8343bf1..b9f94fd561 100644 --- a/src/cmd/manage_nsfs.js +++ b/src/cmd/manage_nsfs.js @@ -249,8 +249,8 @@ async function delete_bucket(data, force) { const fs_context_fs_backend = native_fs_utils.get_process_fs_context(data.fs_backend); const bucket_config_path = get_config_file_path(buckets_dir_path, data.name); try { - const temp_dir_name = config.NSFS_TEMP_DIR_NAME + "_" + data._id; - const bucket_temp_dir_path = path.join(data.path, temp_dir_name); + const temp_dir_name = native_fs_utils.get_bucket_tmpdir_name(data._id); + const bucket_temp_dir_path = native_fs_utils.get_bucket_tmpdir_full_path(data.path, data._id); const entries = await nb_native().fs.readdir(fs_context_fs_backend, data.path); const object_entries = entries.filter(element => !element.name.endsWith(temp_dir_name)); if (object_entries.length === 0 || force) { diff --git a/src/sdk/bucketspace_fs.js b/src/sdk/bucketspace_fs.js index b5dadaf0b8..3d2a8aac90 100644 --- a/src/sdk/bucketspace_fs.js +++ b/src/sdk/bucketspace_fs.js @@ -20,7 +20,8 @@ const mongo_utils = require('../util/mongo_utils'); const { CONFIG_SUBDIRS } = require('../manage_nsfs/manage_nsfs_constants'); const KeysSemaphore = require('../util/keys_semaphore'); -const native_fs_utils = require('../util/native_fs_utils'); +const { get_umasked_mode, isDirectory, validate_bucket_creation, + create_config_file, delete_config_file, get_bucket_tmpdir_full_path, folder_delete } = require('../util/native_fs_utils'); const NoobaaEvent = require('../manage_nsfs/manage_nsfs_events_utils').NoobaaEvent; const dbg = require('../util/debug_module')(__filename); @@ -197,7 +198,6 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { // BUCKET // //////////// - //TODO: we need to add pagination support to list buckets for more than 1000 buckets. /** * list_buckets will read all bucket config files, and filter them according to the requesting account's * permissions @@ -224,7 +224,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { const account = object_sdk.requesting_account; const buckets = await P.map_with_concurrency(10, entries, async entry => { - if (native_fs_utils.isDirectory(entry) || !entry.name.endsWith('.json')) { + if (isDirectory(entry) || !entry.name.endsWith('.json')) { return; } const bucket_name = this.get_bucket_name(entry.name); @@ -258,7 +258,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { throw new RpcError('MISSING_NSFS_ACCOUNT_CONFIGURATION'); } const fs_context = prepare_fs_context(sdk); - native_fs_utils.validate_bucket_creation(params); + validate_bucket_creation(params); const { name } = params; const bucket_config_path = this._get_bucket_config_path(name); @@ -283,7 +283,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { const bucket_to_validate = JSON.parse(bucket_config); dbg.log2("create_bucket: bucket properties before validate_bucket_schema", bucket_to_validate); nsfs_schema_utils.validate_bucket_schema(bucket_to_validate); - await native_fs_utils.create_config_file(this.fs_context, this.bucket_schema_dir, bucket_config_path, bucket_config); + await create_config_file(this.fs_context, this.bucket_schema_dir, bucket_config_path, bucket_config); } catch (err) { new NoobaaEvent(NoobaaEvent.BUCKET_CREATION_FAILED).create_event(name, {bucket_name: name}, err); throw this._translate_bucket_error_codes(err); @@ -291,7 +291,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { // create bucket's underlying storage directory try { - await nb_native().fs.mkdir(fs_context, bucket_storage_path, native_fs_utils.get_umasked_mode(config.BASE_MODE_DIR)); + await nb_native().fs.mkdir(fs_context, bucket_storage_path, get_umasked_mode(config.BASE_MODE_DIR)); new NoobaaEvent(NoobaaEvent.BUCKET_CREATED).create_event(name, {bucket_name: name}); } catch (err) { dbg.error('BucketSpaceFS: create_bucket could not create underlying directory - nsfs, deleting bucket', err); @@ -323,41 +323,44 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { }; } - + /** + * delete_bucket will delete the bucket config file and underlying directory if needed based on the requesting account permissions + * 1. if bucket.should_create_underlying_storage - delete the underlying storage directory = the bucket's underlying FS directory in which the objects are stored + * 2. else - check if there are no objects in the bucket, if any - throw err, else - delete export tmp file + * 3. delete bucket config file + * @param {nb.ObjectSDK} object_sdk + * @returns {Promise} + */ async delete_bucket(params, object_sdk) { - return bucket_semaphore.surround_key(String(params.name), async () => { - const { name } = params; - const bucket_path = this._get_bucket_config_path(name); + const { name } = params; + return bucket_semaphore.surround_key(String(name), async () => { + const bucket_config_path = this._get_bucket_config_path(name); try { - const namespace_bucket_config = await object_sdk.read_bucket_sdk_namespace_info(params.name); + const { ns, bucket } = await object_sdk.read_bucket_full_info(name); + const namespace_bucket_config = bucket && bucket.namespace; dbg.log1('BucketSpaceFS.delete_bucket: namespace_bucket_config', namespace_bucket_config); - const ns = await object_sdk._get_bucket_namespace(params.name); - if (namespace_bucket_config && namespace_bucket_config.should_create_underlying_storage) { - // delete underlying storage = the directory which represents the bucket + if (!namespace_bucket_config) throw new RpcError('INTERNAL_ERROR', 'Invalid Bucket configuration'); + + if (namespace_bucket_config.should_create_underlying_storage) { + // 1. delete underlying storage dbg.log1('BucketSpaceFS.delete_bucket: deleting uls', this.fs_root, namespace_bucket_config.write_resource.path); - await ns.delete_uls({ - name, - full_path: path.join(this.fs_root, namespace_bucket_config.write_resource.path) // includes write_resource.path + bucket name (s3 flow) - }, object_sdk); - } else if (namespace_bucket_config) { - // S3 Delete for NSFS Manage buckets + const bucket_storage_path = path.join(this.fs_root, namespace_bucket_config.write_resource.path); // includes write_resource.path + bucket name (s3 flow) + await ns.delete_uls({ name, full_path: bucket_storage_path }, object_sdk); + } else { + // 2. delete only bucket tmpdir const list = await ns.list_objects({ ...params, limit: 1 }, object_sdk); - if (list && list.objects && list.objects.length > 0) { - throw new RpcError('NOT_EMPTY', 'underlying directory has files in it'); - } - const bucket = await object_sdk.read_bucket_sdk_config_info(params.name); - const bucket_temp_dir_path = path.join(namespace_bucket_config.write_resource.path, - config.NSFS_TEMP_DIR_NAME + "_" + bucket._id); - await native_fs_utils.folder_delete(bucket_temp_dir_path, this.fs_context, true); + if (list && list.objects && list.objects.length > 0) throw new RpcError('NOT_EMPTY', 'underlying directory has files in it'); + const bucket_tmpdir_path = get_bucket_tmpdir_full_path(namespace_bucket_config.write_resource.path, bucket._id); + await folder_delete(bucket_tmpdir_path, this.fs_context, true); } - dbg.log1(`BucketSpaceFS: delete_fs_bucket ${bucket_path}`); - // delete bucket config json file - await native_fs_utils.delete_config_file(this.fs_context, this.bucket_schema_dir, bucket_path); - new NoobaaEvent(NoobaaEvent.BUCKET_DELETE).create_event(name, {bucket_name: name}); + // 3. delete bucket config json file + dbg.log1(`BucketSpaceFS: delete_bucket: deleting config file ${bucket_config_path}`); + await delete_config_file(this.fs_context, this.bucket_schema_dir, bucket_config_path); + new NoobaaEvent(NoobaaEvent.BUCKET_DELETE).create_event(name, { bucket_name: name }); } catch (err) { - new NoobaaEvent(NoobaaEvent.BUCKET_DELETE_FAILED).create_event(params.name, - {bucket_name: params.name, bucket_path: bucket_path}, err); - dbg.error('BucketSpaceFS: delete_bucket error', err); + dbg.error('BucketSpaceFS: delete_bucket: error', err); + new NoobaaEvent(NoobaaEvent.BUCKET_DELETE_FAILED).create_event(name, + { bucket_name: name, bucket_path: bucket_config_path }, err); throw this._translate_bucket_error_codes(err); } }); @@ -402,7 +405,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { this.fs_context, bucket_config_path, Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) + mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { @@ -447,9 +450,8 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { await nb_native().fs.writeFile( this.fs_context, bucket_config_path, - Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) - } + Buffer.from(update_bucket), + { mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { throw this._translate_bucket_error_codes(err); @@ -484,9 +486,8 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { await nb_native().fs.writeFile( this.fs_context, bucket_config_path, - Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) - } + Buffer.from(update_bucket), + { mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { throw this._translate_bucket_error_codes(err); @@ -513,9 +514,8 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { await nb_native().fs.writeFile( this.fs_context, bucket_config_path, - Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) - } + Buffer.from(update_bucket), + { mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { throw this._translate_bucket_error_codes(err); @@ -537,15 +537,18 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { await nb_native().fs.writeFile( this.fs_context, bucket_config_path, - Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) - } + Buffer.from(update_bucket), + { mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { throw this._translate_bucket_error_codes(err); } } + /** + * @param {object} params + * @returns {Promise} + */ async get_bucket_website(params) { try { const { name } = params; @@ -553,7 +556,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { const bucket_config_path = this._get_bucket_config_path(name); const { data } = await nb_native().fs.readFile(this.fs_context, bucket_config_path); const bucket = JSON.parse(data.toString()); - return {website: bucket.website}; + return { website: bucket.website }; } catch (err) { throw this._translate_bucket_error_codes(err); } @@ -582,9 +585,8 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { await nb_native().fs.writeFile( this.fs_context, bucket_config_path, - Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) - } + Buffer.from(update_bucket), + { mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { throw this._translate_bucket_error_codes(err); @@ -606,9 +608,8 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { await nb_native().fs.writeFile( this.fs_context, bucket_config_path, - Buffer.from(update_bucket), { - mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_CONFIG_FILE) - } + Buffer.from(update_bucket), + { mode: get_umasked_mode(config.BASE_MODE_CONFIG_FILE) } ); } catch (err) { throw this._translate_bucket_error_codes(err); diff --git a/src/sdk/bucketspace_simple_fs.js b/src/sdk/bucketspace_simple_fs.js index 970bdea2d0..ec0d10f664 100644 --- a/src/sdk/bucketspace_simple_fs.js +++ b/src/sdk/bucketspace_simple_fs.js @@ -120,7 +120,12 @@ class BucketSpaceSimpleFS { } } - async delete_bucket(params) { + /** + * @param {object} params + * @param {nb.ObjectSDK} object_sdk + * @returns {Promise} + */ + async delete_bucket(params, object_sdk) { try { const { name } = params; const bucket_path = path.join(this.fs_root, name); @@ -203,6 +208,10 @@ class BucketSpaceSimpleFS { // TODO } + /** + * @param {object} params + * @returns {Promise} + */ async get_bucket_website(params) { // TODO } @@ -236,6 +245,23 @@ class BucketSpaceSimpleFS { async put_object_lock_configuration(params, object_sdk) { // TODO } + + + ///////////////////// + // BUCKET LOGGING // + ///////////////////// + + async put_bucket_logging(params) { + // TODO + } + + async delete_bucket_logging(params) { + // TODO + } + + async get_bucket_logging(params) { + // TODO + } } module.exports = BucketSpaceSimpleFS; diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js index 1701129d13..fe2d05ed96 100644 --- a/src/sdk/namespace_fs.js +++ b/src/sdk/namespace_fs.js @@ -491,8 +491,18 @@ class NamespaceFS { return fs_context; } - get_bucket_tmpdir() { - return config.NSFS_TEMP_DIR_NAME + '_' + this.bucket_id; + /** + * @returns {string} + */ + get_bucket_tmpdir_name() { + return native_fs_utils.get_bucket_tmpdir_name(this.bucket_id); + } + + /** + * @returns {string} + */ + get_bucket_tmpdir_full_path() { + return native_fs_utils.get_bucket_tmpdir_full_path(this.bucket_path, this.bucket_id); } get_write_resource() { @@ -706,7 +716,7 @@ class NamespaceFS { // dbg.log0('process_entry', dir_key, ent.name); if ((!ent.name.startsWith(prefix_ent) || ent.name < marker_curr || - ent.name === this.get_bucket_tmpdir() || + ent.name === this.get_bucket_tmpdir_name() || ent.name === config.NSFS_FOLDER_OBJECT_NAME) && !this._is_hidden_version_path(ent.name)) { return; @@ -1163,7 +1173,8 @@ class NamespaceFS { // upload path is needed only when open_mode is w / for copy if (open_mode === 'w' || params.copy_source) { const upload_id = uuidv4(); - upload_path = path.join(this.bucket_path, this.get_bucket_tmpdir(), 'uploads', upload_id); + const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path(); + upload_path = path.join(bucket_tmp_dir_path, 'uploads', upload_id); await native_fs_utils._make_path_dirs(upload_path, fs_context); } let open_path = upload_path || file_path; @@ -1408,7 +1419,7 @@ class NamespaceFS { await this._open_files_gpfs(fs_context, new_ver_tmp_path, latest_ver_path, upload_file, latest_ver_info, open_mode, undefined, versioned_info) : undefined; - const bucket_tmp_dir_path = path.join(this.bucket_path, this.get_bucket_tmpdir()); + const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path(); dbg.log1('Namespace_fs._move_to_dest_version:', latest_ver_info, new_ver_info, gpfs_options); if (this._is_versioning_suspended()) { @@ -2385,8 +2396,7 @@ class NamespaceFS { _mpu_root_path() { return path.join( - this.bucket_path, - this.get_bucket_tmpdir(), + this.get_bucket_tmpdir_full_path(), 'multipart-uploads'); } @@ -2715,7 +2725,7 @@ class NamespaceFS { gpfs_options = is_gpfs ? await this._open_files_gpfs(fs_context, file_path, undefined, undefined, undefined, undefined, true) : undefined; - const bucket_tmp_dir_path = path.join(this.bucket_path, this.get_bucket_tmpdir()); + const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path(); await native_fs_utils.safe_unlink(fs_context, file_path, version_info, gpfs_options, bucket_tmp_dir_path); return { ...version_info, latest: true }; } else { @@ -2827,7 +2837,7 @@ class NamespaceFS { deleted_version_info.mtimeNsBigint < max_past_ver_info.mtimeNsBigint) return; dbg.log1('Namespace_fs._promote_version_to_latest ', max_past_ver_info.path, latest_ver_path, max_past_ver_info, latest_version_info); // on concurrent put, safe_move_gpfs might override new coming latest (no fd verification, gpfs linkfileat will override) - const bucket_tmp_dir_path = path.join(this.bucket_path, this.get_bucket_tmpdir()); + const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path(); await native_fs_utils.safe_move_posix(fs_context, max_past_ver_info.path, latest_ver_path, max_past_ver_info, bucket_tmp_dir_path); break; @@ -2881,7 +2891,7 @@ class NamespaceFS { if (latest_ver_info) { const suspended_and_latest_is_not_null = this._is_versioning_suspended() && latest_ver_info.version_id_str !== NULL_VERSION_ID; - const bucket_tmp_dir_path = path.join(this.bucket_path, this.get_bucket_tmpdir()); + const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path(); if (this._is_versioning_enabled() || suspended_and_latest_is_not_null) { await native_fs_utils._make_path_dirs(versioned_path, fs_context); await native_fs_utils.safe_move(fs_context, latest_ver_path, versioned_path, latest_ver_info, @@ -2930,7 +2940,7 @@ class NamespaceFS { const gpfs_options = is_gpfs ? await this._open_files_gpfs(fs_context, null_versioned_path, undefined, undefined, undefined, undefined, true) : undefined; - const bucket_tmp_dir_path = path.join(this.bucket_path, this.get_bucket_tmpdir()); + const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path(); await native_fs_utils.safe_unlink(fs_context, null_versioned_path, null_versioned_path_info, gpfs_options, bucket_tmp_dir_path); diff --git a/src/sdk/nb.d.ts b/src/sdk/nb.d.ts index 981d477d2f..20d23de23c 100644 --- a/src/sdk/nb.d.ts +++ b/src/sdk/nb.d.ts @@ -904,7 +904,7 @@ interface NativeFS { realpath(fs_context: NativeFSContext, path: string): Promise; checkAccess(fs_context: NativeFSContext, path: string): Promise; getsinglexattr(fs_context: NativeFSContext, path: string, key: string): Promise; - getpwname(fs_context: NativeFSContext, user: string): Promise; + getpwname(fs_context: NativeFSContext, user: string): Promise; readFile( fs_context: NativeFSContext, @@ -993,6 +993,12 @@ type NativeFSStats = fs.Stats & { xattr?: NativeFSXattr; }; +type NativeFSUserObject = { + uid: number; + gid: number; + name: string; +}; + interface HasherSync { update(buffer: Buffer): this; digest(): Buffer; diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js index f94e392216..221e14369f 100644 --- a/src/sdk/object_sdk.js +++ b/src/sdk/object_sdk.js @@ -204,6 +204,10 @@ class ObjectSDK { return bucket.bucket_info.data; } + async read_bucket_full_info(name) { + return bucket_namespace_cache.get_with_cache({ sdk: this, name }); + } + async load_requesting_account(req) { try { const token = this.get_auth_token(); diff --git a/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js b/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js index 30af0c175b..f11e34ea68 100644 --- a/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js +++ b/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js @@ -9,12 +9,11 @@ const fs = require('fs'); const path = require('path'); const os_util = require('../../../util/os_utils'); const fs_utils = require('../../../util/fs_utils'); -const config_module = require('../../../../config'); const nb_native = require('../../../util/nb_native'); const { set_path_permissions_and_owner, TMP_PATH, generate_s3_policy, set_nc_config_dir_in_config } = require('../../system_tests/test_utils'); const { ACTIONS, TYPES, CONFIG_SUBDIRS } = require('../../../manage_nsfs/manage_nsfs_constants'); -const { get_process_fs_context, is_path_exists } = require('../../../util/native_fs_utils'); +const { get_process_fs_context, is_path_exists, get_bucket_tmpdir_full_path } = require('../../../util/native_fs_utils'); const ManageCLIError = require('../../../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError; const { ManageCLIResponse } = require('../../../manage_nsfs/manage_nsfs_cli_responses'); @@ -182,8 +181,7 @@ describe('manage nsfs cli bucket flow', () => { const bucket_resp = JSON.parse(resp); expect(bucket_resp.response.reply._id).not.toBeNull(); //create temp dir - bucket_temp_dir_path = path.join(bucket_storage_path, - config_module.NSFS_TEMP_DIR_NAME + "_" + bucket_resp.response.reply._id); + bucket_temp_dir_path = get_bucket_tmpdir_full_path(bucket_storage_path, bucket_resp.response.reply._id); await fs_utils.create_fresh_path(bucket_temp_dir_path); await fs_utils.file_must_exist(bucket_temp_dir_path); }); diff --git a/src/test/unit_tests/test_bucketspace_fs.js b/src/test/unit_tests/test_bucketspace_fs.js index a11ac2b42b..aa0e3a33ee 100644 --- a/src/test/unit_tests/test_bucketspace_fs.js +++ b/src/test/unit_tests/test_bucketspace_fs.js @@ -10,7 +10,7 @@ const assert = require('assert'); const P = require('../../util/promise'); const config = require('../../../config'); const fs_utils = require('../../util/fs_utils'); -const { get_process_fs_context, read_file, get_user_by_distinguished_name} = require('../../util/native_fs_utils'); +const { get_process_fs_context, read_file, get_user_by_distinguished_name, get_bucket_tmpdir_name } = require('../../util/native_fs_utils'); const nb_native = require('../../util/nb_native'); const SensitiveString = require('../../util/sensitive_string'); const NamespaceFS = require('../../sdk/namespace_fs'); @@ -145,7 +145,15 @@ function make_dummy_object_sdk() { }, _get_bucket_namespace(name) { const buck_path = path.join(new_buckets_path, name); - const dummy_nsfs = new NamespaceFS({ bucket_path: buck_path, bucket_id: '1', namespace_resource_id: undefined }); + const dummy_nsfs = new NamespaceFS({ + bucket_path: buck_path, + bucket_id: '1', + namespace_resource_id: undefined, + access_mode: undefined, + versioning: undefined, + force_md5_etag: undefined, + stats: undefined + }); return dummy_nsfs; }, is_nsfs_bucket(ns) { @@ -158,6 +166,27 @@ function make_dummy_object_sdk() { async read_bucket_sdk_policy_info(name) { const bucket_info = await bucketspace_fs.read_bucket_sdk_info({ name }); return { s3_policy: bucket_info.s3_policy }; + }, + async read_bucket_full_info(name) { + const buck_path = path.join(new_buckets_path, name); + const bucket = (await bucketspace_fs.read_bucket_sdk_info({ name })); + if (name === test_bucket_temp_dir) { + bucket.namespace.should_create_underlying_storage = false; + } else { + bucket.namespace.should_create_underlying_storage = true; + } + return { + ns: new NamespaceFS({ + bucket_path: buck_path, + bucket_id: '1', + namespace_resource_id: undefined, + access_mode: undefined, + versioning: undefined, + force_md5_etag: undefined, + stats: undefined + }), + bucket + }; } }; } @@ -356,29 +385,32 @@ mocha.describe('bucketspace_fs', function() { }); mocha.it('delete_bucket for should_create_underlying_storage false', async function() { - const param = { name: test_bucket_temp_dir}; + const param = { name: test_bucket_temp_dir }; await create_bucket(param.name); await fs.promises.stat(path.join(new_buckets_path, param.name)); const bucket_config_path = get_config_file_path(CONFIG_SUBDIRS.BUCKETS, param.name); const data = await fs.promises.readFile(bucket_config_path); const bucket = await JSON.parse(data.toString()); - const bucket_temp_dir_path = path.join(new_buckets_path, param.name, config.NSFS_TEMP_DIR_NAME + "_" + bucket._id); + const bucket_temp_dir_path = path.join(new_buckets_path, param.name, get_bucket_tmpdir_name(bucket._id)); await nb_native().fs.mkdir(ACCOUNT_FS_CONFIG, bucket_temp_dir_path); await fs.promises.stat(bucket_temp_dir_path); await bucketspace_fs.delete_bucket(param, dummy_object_sdk); try { await fs.promises.stat(bucket_temp_dir_path); + assert.fail('stat should have failed with ENOENT'); } catch (err) { assert.strictEqual(err.code, 'ENOENT'); assert.match(err.message, /.noobaa-nsfs_/); } try { await fs.promises.stat(bucket_config_path); + assert.fail('stat should have failed with ENOENT'); } catch (err) { assert.strictEqual(err.code, 'ENOENT'); const path_for_err_msg = path.join(TMP_PATH, 'test_bucketspace_fs/config_root/buckets/buckettempdir.json'); assert.equal(err.message, `ENOENT: no such file or directory, stat '${path_for_err_msg}'`); } + console.log('ROMY hi'); await fs.promises.stat(path.join(new_buckets_path, param.name)); }); }); diff --git a/src/test/unit_tests/test_nsfs_versioning.js b/src/test/unit_tests/test_nsfs_versioning.js index b28dd4d669..81ce66cff4 100644 --- a/src/test/unit_tests/test_nsfs_versioning.js +++ b/src/test/unit_tests/test_nsfs_versioning.js @@ -88,7 +88,7 @@ mocha.describe('namespace_fs - versioning', function() { const to_path = path.join(ns_tmp_bucket_path, file_key + '_mtime-1-ino-2'); const fake_mtime_ino = { mtimeNsBigint: BigInt(0), ino: 0 }; try { - const bucket_tmp_dir_path = path.join(ns_tmp.bucket_path, ns_tmp.get_bucket_tmpdir()); + const bucket_tmp_dir_path = ns_tmp.get_bucket_tmpdir_full_path(); await native_fs_utils.safe_move_posix( dummy_object_sdk.requesting_account.nsfs_account_config, from_path, @@ -108,7 +108,7 @@ mocha.describe('namespace_fs - versioning', function() { const to_path = path.join(ns_tmp_bucket_path, file_key2 + '_mtime-1-ino-2'); const fake_mtime_ino = { mtimeNsBigint: BigInt(0), ino: 0 }; try { - const bucket_tmp_dir_path = path.join(ns_tmp.bucket_path, ns_tmp.get_bucket_tmpdir()); + const bucket_tmp_dir_path = ns_tmp.get_bucket_tmpdir_full_path(); await native_fs_utils.safe_move_posix( dummy_object_sdk.requesting_account.nsfs_account_config, from_path, diff --git a/src/util/native_fs_utils.js b/src/util/native_fs_utils.js index 575fd8f400..958ddb240e 100644 --- a/src/util/native_fs_utils.js +++ b/src/util/native_fs_utils.js @@ -562,6 +562,27 @@ async function read_file(fs_context, _path) { return data_parsed; } + +/** + * get_bucket_tmpdir_name returns the bucket tmp dir name + * @param {string} bucket_id + * @returns {string} + */ +function get_bucket_tmpdir_name(bucket_id) { + return config.NSFS_TEMP_DIR_NAME + '_' + bucket_id; +} + + +/** + * get_bucket_tmpdir_full_path returns the bucket tmp dir path + * @param {string} bucket_path + * @param {string} bucket_id + * @return {string} + */ +function get_bucket_tmpdir_full_path(bucket_path, bucket_id) { + return path.join(bucket_path, get_bucket_tmpdir_name(bucket_id)); +} + exports.get_umasked_mode = get_umasked_mode; exports._make_path_dirs = _make_path_dirs; exports._create_path = _create_path; @@ -595,3 +616,5 @@ exports.validate_bucket_creation = validate_bucket_creation; exports.is_path_exists = is_path_exists; exports.is_dir_rw_accessible = is_dir_rw_accessible; exports.folder_delete = folder_delete; +exports.get_bucket_tmpdir_full_path = get_bucket_tmpdir_full_path; +exports.get_bucket_tmpdir_name = get_bucket_tmpdir_name;