Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/agent/block_store_services/block_store_azure.js
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ class BlockStoreAzure extends BlockStoreBase {

const data_size = Number(info_arg.contentLength);
const noobaablockmd = info_arg.metadata.noobaablockmd || info_arg.metadata.noobaa_block_md;
dbg.log0('block_store_azure._delete_blocks info: data_size: ', data_size, 'noobaablockmd: ', noobaablockmd);
dbg.log1('block_store_azure._delete_blocks info: data_size: ', data_size, 'noobaablockmd: ', noobaablockmd);

const md_size = (noobaablockmd && noobaablockmd.length) || 0;
deleted_storage.size -= (data_size + md_size);
Expand Down
2 changes: 1 addition & 1 deletion src/agent/block_store_services/block_store_base.js
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ class BlockStoreBase {

async delete_blocks(req) {
const block_ids = req.rpc_params.block_ids;
dbg.log0('delete_blocks', block_ids, 'node', this.node_name);
dbg.log1('delete_blocks', block_ids, 'node', this.node_name);
this.block_cache.multi_invalidate_keys(block_ids);
return this.block_modify_lock.surround_keys(
block_ids.map(block_id => String(block_id)),
Expand Down
2 changes: 1 addition & 1 deletion src/agent/block_store_services/block_store_s3.js
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ class BlockStoreS3 extends BlockStoreBase {
.filter(it => it.Key === key && !it.IsLatest)
.map(it => ({ Key: it.Key, VersionId: it.VersionId }));
if (delete_list.length) {
dbg.log0('BlockStoreS3._delete_past_versions: target_bucket',
dbg.log1('BlockStoreS3._delete_past_versions: target_bucket',
this.cloud_info.target_bucket, 'delete_list', delete_list);
await this.s3cloud.deleteObjects({
Bucket: this.cloud_info.target_bucket,
Expand Down
2 changes: 1 addition & 1 deletion src/sdk/object_sdk.js
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ class ObjectSDK {
const { bucket } = await bucket_namespace_cache.get_with_cache({ sdk: this, name });
return bucket.namespace ? bucket.namespace.caching : undefined;
} catch (error) {
dbg.log0('read_bucket_sdk_caching_info error', error);
dbg.error('read_bucket_sdk_caching_info error', error);
}
}

Expand Down
12 changes: 8 additions & 4 deletions src/server/common_services/auth_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,10 @@ function create_auth(req) {

// consider email not found the same as bad password to avoid phishing attacks.
target_account = system_store.get_account_by_email(email);
dbg.log0('credentials account not found', email, system_name);
if (!target_account) throw new RpcError('UNAUTHORIZED', 'credentials not found');
if (!target_account) {
dbg.log0('credentials account not found', email, system_name);
throw new RpcError('UNAUTHORIZED', 'credentials not found');
}

// when password is not provided it means we want to give authorization
// by the currently authorized to another specific account instead of
Expand All @@ -66,8 +68,10 @@ function create_auth(req) {
return P.resolve()
.then(() => bcrypt.compare(password.unwrap(), target_account.password.unwrap()))
.then(match => {
dbg.log0('password mismatch', email, system_name);
if (!match) throw new RpcError('UNAUTHORIZED', 'credentials not found');
if (!match) {
dbg.log0('password mismatch', email, system_name);
throw new RpcError('UNAUTHORIZED', 'credentials not found');
}
// authentication passed!
// so this account is the authenticated_account
authenticated_account = target_account;
Expand Down
4 changes: 2 additions & 2 deletions src/server/node_services/node_allocator.js
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,11 @@ async function refresh_pool_alloc(pool, force) {
group.last_refresh = Date.now();
group.promise = null;
group.latency_groups = res.latency_groups;
dbg.log0('refresh_pool_alloc: updated pool', pool.name,
dbg.log1('refresh_pool_alloc: updated pool', pool.name,
'nodes', _.map(_.flatMap(group.latency_groups, 'nodes'), 'name'));
_.each(alloc_group_by_pool_set, (g, pool_set) => {
if (_.includes(pool_set, String(pool._id))) {
dbg.log0('invalidate alloc_group_by_pool_set for', pool_set,
dbg.log1('invalidate alloc_group_by_pool_set for', pool_set,
'on change to pool', pool._id);
delete alloc_group_by_pool_set[pool_set];
}
Expand Down
42 changes: 21 additions & 21 deletions src/server/node_services/nodes_monitor.js
Original file line number Diff line number Diff line change
Expand Up @@ -836,7 +836,7 @@ class NodesMonitor extends EventEmitter {
_run() {
if (!this._started) return;
return this._run_serial.surround(() => {
dbg.log0('_run:', this._map_node_id.size, 'nodes in queue');
dbg.log1('_run:', this._map_node_id.size, 'nodes in queue');
let next = 0;
const queue = Array.from(this._map_node_id.values());
const concur = Math.min(queue.length, RUN_NODE_CONCUR);
Expand All @@ -863,7 +863,7 @@ class NodesMonitor extends EventEmitter {
if (item.node.deleted) return P.reject(new Error(`node ${item.node.name} is deleted`));
return item._run_node_serial.surround(() =>
P.resolve()
.then(() => dbg.log0('_run_node:', item.node.name))
.then(() => dbg.log1('_run_node:', item.node.name))
.then(() => this._get_agent_info(item))
.then(() => { //If internal or cloud resource, cut down initializing time (in update_rpc_config)
if (!item.node_from_store && (item.node.is_mongo_node || item.node.is_cloud_node)) {
Expand Down Expand Up @@ -932,11 +932,11 @@ class NodesMonitor extends EventEmitter {
alert = `The target ${storage_container} does not exist for cloud resource ${pool.name}`;
}
break;
case 'OFFLINE':
alert = `Cloud resource ${pool.name} is offline`;
break;
default:
break;
case 'OFFLINE':
alert = `Cloud resource ${pool.name} is offline`;
break;
default:
break;
}

if (alert) {
Expand Down Expand Up @@ -1034,7 +1034,7 @@ class NodesMonitor extends EventEmitter {
_get_agent_info(item) {
if (item.node.deleted) return;
if (!item.connection) return;
dbg.log0('_get_agent_info:', item.node.name);
dbg.log1('_get_agent_info:', item.node.name);
let potential_masters = clustering_utils.get_potential_masters().map(addr => ({
address: url.format({
protocol: 'wss',
Expand Down Expand Up @@ -1147,7 +1147,7 @@ class NodesMonitor extends EventEmitter {
counter += 1;
}
this._map_node_name.set(String(updates.name), item);
dbg.log0('_get_agent_info: set node name', item.node.name, 'to', updates.name);
dbg.log1('_get_agent_info: set node name', item.node.name, 'to', updates.name);
item.new_host = !this._map_host_id.get(updates.host_id);
if (!item.added_host) {
if (!item.new_host) {
Expand All @@ -1164,7 +1164,7 @@ class NodesMonitor extends EventEmitter {
let agent_config = system_store.data.get_by_id(item.node.agent_config) || {};
// on first call to get_agent_info enable\disable the node according to the configuration
let should_start_service = this._should_enable_agent(info, agent_config);
dbg.log0(`first call to get_agent_info. storage agent ${item.node.name}. should_start_service=${should_start_service}. `);
dbg.log1(`first call to get_agent_info. storage agent ${item.node.name}. should_start_service=${should_start_service}. `);
if (!should_start_service) {
item.node.decommissioned = Date.now();
item.node.decommissioning = item.node.decommissioned;
Expand Down Expand Up @@ -1367,7 +1367,7 @@ class NodesMonitor extends EventEmitter {
if (item.last_store_perf_test && now < item.last_store_perf_test + STORE_PERF_TEST_INTERVAL) return;


dbg.log0('running _test_store_perf::', item.node.name);
dbg.log1('running _test_store_perf::', item.node.name);
const res = await P.timeout(AGENT_RESPONSE_TIMEOUT,
this.client.agent.test_store_perf({
count: 5
Expand All @@ -1376,7 +1376,7 @@ class NodesMonitor extends EventEmitter {
})
);
item.last_store_perf_test = Date.now();
dbg.log0('_test_store_perf returned:', res);
dbg.log0(`_test_store_perf for node ${item.node.name} returned:`, res);
this._set_need_update.add(item);
item.node.latency_of_disk_read = js_utils.array_push_keep_latest(
item.node.latency_of_disk_read, res.read, MAX_NUM_LATENCIES);
Expand Down Expand Up @@ -1437,7 +1437,7 @@ class NodesMonitor extends EventEmitter {

const start = Date.now();

dbg.log0('_test_network_to_server::', item.node.name);
dbg.log1('_test_network_to_server::', item.node.name);
return P.timeout(AGENT_TEST_CONNECTION_TIMEOUT,
this.n2n_client.agent.test_network_perf({
source: this.n2n_agent.rpc_address,
Expand All @@ -1453,7 +1453,7 @@ class NodesMonitor extends EventEmitter {
this._set_need_update.add(item);
item.node.latency_to_server = js_utils.array_push_keep_latest(
item.node.latency_to_server, [took], MAX_NUM_LATENCIES);
dbg.log0('_test_network_to_server:: Succeeded in sending n2n rpc to ',
dbg.log1('_test_network_to_server:: Succeeded in sending n2n rpc to ',
item.node.name, 'took', took);
req.connection.close();

Expand All @@ -1478,7 +1478,7 @@ class NodesMonitor extends EventEmitter {

const items_without_issues = this._get_detention_test_nodes(item, config.NODE_IO_DETENTION_TEST_NODES);
return P.map_one_by_one(items_without_issues, item_without_issues => {
dbg.log0('_test_network_perf::', item.node.name, item.io_detention,
dbg.log1('_test_network_perf::', item.node.name, item.io_detention,
item.node.rpc_address, item_without_issues.node.rpc_address);
return P.timeout(AGENT_TEST_CONNECTION_TIMEOUT,
this.client.agent.test_network_perf_to_peer({
Expand All @@ -1494,7 +1494,7 @@ class NodesMonitor extends EventEmitter {
);
})
.then(() => {
dbg.log0('_test_network_perf:: success in test', item.node.name);
dbg.log1('_test_network_perf:: success in test', item.node.name);
if (item.n2n_errors &&
Date.now() - item.n2n_errors > config.NODE_IO_DETENTION_THRESHOLD) {
item.n2n_errors = 0;
Expand All @@ -1511,7 +1511,7 @@ class NodesMonitor extends EventEmitter {
_test_nodes_validity(item) {
if (item.node.deleted) return;
if (!item.node_from_store) return;
dbg.log0('_test_nodes_validity::', item.node.name);
dbg.log1('_test_nodes_validity::', item.node.name);
return P.resolve()
.then(() => Promise.all([
this._test_network_perf(item),
Expand All @@ -1521,7 +1521,7 @@ class NodesMonitor extends EventEmitter {
.then(() => {
if (item.io_reported_errors &&
Date.now() - item.io_reported_errors > config.NODE_IO_DETENTION_THRESHOLD) {
dbg.log0('_test_nodes_validity:: io_reported_errors removed', item.node.name);
dbg.log1('_test_nodes_validity:: io_reported_errors removed', item.node.name);
item.io_reported_errors = 0;
}
});
Expand All @@ -1541,7 +1541,7 @@ class NodesMonitor extends EventEmitter {
sort: 'shuffle'
});
const selected = _.take(list, limit);
dbg.log0('_get_detention_test_nodes::', item.node.name,
dbg.log1('_get_detention_test_nodes::', item.node.name,
_.map(selected, 'node.name'), limit);
return _.isUndefined(limit) ? list : selected;
}
Expand Down Expand Up @@ -2400,7 +2400,7 @@ class NodesMonitor extends EventEmitter {
);
if (!root_item) {
// if for some reason root node not found, take the first one.
dbg.log0(`could not find node for root path, taking the first in the list. drives = ${host_nodes.map(item => item.node.drives[0])}`);
dbg.log1(`could not find node for root path, taking the first in the list. drives = ${host_nodes.map(item => item.node.drives[0])}`);
root_item = host_nodes[0];
}
const host_item = _.clone(root_item);
Expand Down Expand Up @@ -3324,7 +3324,7 @@ class NodesMonitor extends EventEmitter {

test_node_network(req) {
const { rpc_params } = req;
dbg.log0('test_node_network:', rpc_params);
dbg.log1('test_node_network:', rpc_params);
this._throw_if_not_started_and_loaded();
const item = this._get_node({
rpc_address: rpc_params.source
Expand Down
6 changes: 3 additions & 3 deletions src/server/system_services/bucket_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -1302,7 +1302,7 @@ async function claim_bucket(req) {
try {
validate_bucket_creation(req);
} catch (err) {
dbg.log0('claim_bucket failed validating bucket', err);
dbg.error('claim_bucket failed validating bucket', err);
throw err;
}
try {
Expand All @@ -1314,7 +1314,7 @@ async function claim_bucket(req) {
auth_token: req.auth_token
});
} catch (err) {
dbg.log0('claim_bucket failed creating bucket', err);
dbg.error('claim_bucket failed creating bucket', err);
throw err;
}
}
Expand Down Expand Up @@ -1343,7 +1343,7 @@ async function claim_bucket(req) {
};
return ret;
} catch (err) {
dbg.log0('claim_bucket failed creating account', err);
dbg.error('claim_bucket failed creating account', err);
if (req.rpc_params.create_bucket) {
await server_rpc.client.bucket.delete_bucket({
name: req.rpc_params.name
Expand Down
2 changes: 1 addition & 1 deletion src/server/system_services/system_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ async function _init() {
}

} catch (err) {
dbg.log0('system_server _init', 'UNCAUGHT ERROR', err, err.stack);
dbg.error('system_server _init', 'UNCAUGHT ERROR', err, err.stack);
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/upgrade/upgrade_manager.js
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ async function run_upgrade() {
await script.run({ dbg, db_client, system_store, system_server });
this_upgrade.completed_scripts.push(script.file);
} catch (err) {
dbg.log0(`failed running upgrade script ${script.file}`, err);
dbg.error(`failed running upgrade script ${script.file}`, err);
this_upgrade.error = err.stack;
throw err;
}
Expand Down