Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove -Wshadow suppression which leaked into global namespace #48737

Merged
merged 1 commit into from
Apr 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
48 changes: 24 additions & 24 deletions programs/server/Server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1192,12 +1192,12 @@ try
{
Settings::checkNoSettingNamesAtTopLevel(*config, config_path);

ServerSettings server_settings;
server_settings.loadSettingsFromConfig(*config);
ServerSettings server_settings_;
server_settings_.loadSettingsFromConfig(*config);

size_t max_server_memory_usage = server_settings.max_server_memory_usage;
size_t max_server_memory_usage = server_settings_.max_server_memory_usage;

double max_server_memory_usage_to_ram_ratio = server_settings.max_server_memory_usage_to_ram_ratio;
double max_server_memory_usage_to_ram_ratio = server_settings_.max_server_memory_usage_to_ram_ratio;
size_t default_max_server_memory_usage = static_cast<size_t>(memory_amount * max_server_memory_usage_to_ram_ratio);

if (max_server_memory_usage == 0)
Expand Down Expand Up @@ -1225,7 +1225,7 @@ try
total_memory_tracker.setDescription("(total)");
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);

total_memory_tracker.setAllowUseJemallocMemory(server_settings.allow_use_jemalloc_memory);
total_memory_tracker.setAllowUseJemallocMemory(server_settings_.allow_use_jemalloc_memory);

auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
Expand All @@ -1243,23 +1243,23 @@ try

global_context->setRemoteHostFilter(*config);

global_context->setMaxTableSizeToDrop(server_settings.max_table_size_to_drop);
global_context->setMaxPartitionSizeToDrop(server_settings.max_partition_size_to_drop);
global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop);
global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop);

ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited;
if (server_settings.concurrent_threads_soft_limit_num > 0 && server_settings.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
concurrent_threads_soft_limit = server_settings.concurrent_threads_soft_limit_num;
if (server_settings.concurrent_threads_soft_limit_ratio_to_cores > 0)
if (server_settings_.concurrent_threads_soft_limit_num > 0 && server_settings_.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
concurrent_threads_soft_limit = server_settings_.concurrent_threads_soft_limit_num;
if (server_settings_.concurrent_threads_soft_limit_ratio_to_cores > 0)
{
auto value = server_settings.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
auto value = server_settings_.concurrent_threads_soft_limit_ratio_to_cores * std::thread::hardware_concurrency();
if (value > 0 && value < concurrent_threads_soft_limit)
concurrent_threads_soft_limit = value;
}
ConcurrencyControl::instance().setMaxConcurrency(concurrent_threads_soft_limit);

global_context->getProcessList().setMaxSize(server_settings.max_concurrent_queries);
global_context->getProcessList().setMaxInsertQueriesAmount(server_settings.max_concurrent_insert_queries);
global_context->getProcessList().setMaxSelectQueriesAmount(server_settings.max_concurrent_select_queries);
global_context->getProcessList().setMaxSize(server_settings_.max_concurrent_queries);
global_context->getProcessList().setMaxInsertQueriesAmount(server_settings_.max_concurrent_insert_queries);
global_context->getProcessList().setMaxSelectQueriesAmount(server_settings_.max_concurrent_select_queries);

if (config->has("keeper_server"))
global_context->updateKeeperConfiguration(*config);
Expand All @@ -1270,34 +1270,34 @@ try
/// This is done for backward compatibility.
if (global_context->areBackgroundExecutorsInitialized())
{
auto new_pool_size = server_settings.background_pool_size;
auto new_ratio = server_settings.background_merges_mutations_concurrency_ratio;
auto new_pool_size = server_settings_.background_pool_size;
auto new_ratio = server_settings_.background_merges_mutations_concurrency_ratio;
global_context->getMergeMutateExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, static_cast<size_t>(new_pool_size * new_ratio));
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(server_settings.background_merges_mutations_scheduling_policy.toString());
global_context->getMergeMutateExecutor()->updateSchedulingPolicy(server_settings_.background_merges_mutations_scheduling_policy.toString());
}

if (global_context->areBackgroundExecutorsInitialized())
{
auto new_pool_size = server_settings.background_move_pool_size;
auto new_pool_size = server_settings_.background_move_pool_size;
global_context->getMovesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
}

if (global_context->areBackgroundExecutorsInitialized())
{
auto new_pool_size = server_settings.background_fetches_pool_size;
auto new_pool_size = server_settings_.background_fetches_pool_size;
global_context->getFetchesExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
}

if (global_context->areBackgroundExecutorsInitialized())
{
auto new_pool_size = server_settings.background_common_pool_size;
auto new_pool_size = server_settings_.background_common_pool_size;
global_context->getCommonExecutor()->increaseThreadsAndMaxTasksCount(new_pool_size, new_pool_size);
}

global_context->getBufferFlushSchedulePool().increaseThreadsCount(server_settings.background_buffer_flush_schedule_pool_size);
global_context->getSchedulePool().increaseThreadsCount(server_settings.background_schedule_pool_size);
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(server_settings.background_message_broker_schedule_pool_size);
global_context->getDistributedSchedulePool().increaseThreadsCount(server_settings.background_distributed_schedule_pool_size);
global_context->getBufferFlushSchedulePool().increaseThreadsCount(server_settings_.background_buffer_flush_schedule_pool_size);
global_context->getSchedulePool().increaseThreadsCount(server_settings_.background_schedule_pool_size);
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(server_settings_.background_message_broker_schedule_pool_size);
global_context->getDistributedSchedulePool().increaseThreadsCount(server_settings_.background_distributed_schedule_pool_size);

if (config->has("resources"))
{
Expand Down
10 changes: 5 additions & 5 deletions src/Databases/TablesLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ void TablesLoader::removeUnresolvableDependencies()
}


void TablesLoader::loadTablesInTopologicalOrder(ThreadPool & pool)
void TablesLoader::loadTablesInTopologicalOrder(ThreadPool & pool_)
{
/// Compatibility setting which should be enabled by default on attach
/// Otherwise server will be unable to start for some old-format of IPv6/IPv4 types of columns
Expand All @@ -189,20 +189,20 @@ void TablesLoader::loadTablesInTopologicalOrder(ThreadPool & pool)

for (size_t level = 0; level != tables_to_load.size(); ++level)
{
startLoadingTables(pool, load_context, tables_to_load[level], level);
pool.wait();
startLoadingTables(pool_, load_context, tables_to_load[level], level);
pool_.wait();
}
}

void TablesLoader::startLoadingTables(ThreadPool & pool, ContextMutablePtr load_context, const std::vector<StorageID> & tables_to_load, size_t level)
void TablesLoader::startLoadingTables(ThreadPool & pool_, ContextMutablePtr load_context, const std::vector<StorageID> & tables_to_load, size_t level)
{
size_t total_tables = metadata.parsed_tables.size();

LOG_INFO(log, "Loading {} tables with dependency level {}", tables_to_load.size(), level);

for (const auto & table_id : tables_to_load)
{
pool.scheduleOrThrowOnError([this, load_context, total_tables, table_name = table_id.getQualifiedName()]()
pool_.scheduleOrThrowOnError([this, load_context, total_tables, table_name = table_id.getQualifiedName()]()
{
const auto & path_and_query = metadata.parsed_tables[table_name];
databases[table_name.database]->loadTableFromMetadata(load_context, path_and_query.path, table_name, path_and_query.ast, strictness_mode);
Expand Down
3 changes: 0 additions & 3 deletions src/Dictionaries/DictionaryStructure.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@
#include <Interpreters/IExternalLoadable.h>


/// Clang mistakenly warns about the names in enum class.
#pragma clang diagnostic ignored "-Wshadow"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

omg, great finding!


namespace DB
{
using TypeIndexUnderlying = magic_enum::underlying_type_t<TypeIndex>;
Expand Down
4 changes: 2 additions & 2 deletions src/Dictionaries/HashedDictionary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,13 +139,13 @@ class ParallelDictionaryLoader : public boost::noncopyable
void threadWorker(size_t shard)
{
Block block;
DictionaryKeysArenaHolder<dictionary_key_type> arena_holder;
DictionaryKeysArenaHolder<dictionary_key_type> arena_holder_;
auto & shard_queue = *shards_queues[shard];

while (shard_queue.pop(block))
{
Stopwatch watch;
dictionary.blockToAttributes(block, arena_holder, shard);
dictionary.blockToAttributes(block, arena_holder_, shard);
UInt64 elapsed_ms = watch.elapsedMilliseconds();
if (elapsed_ms > 1'000)
LOG_TRACE(dictionary.log, "Block processing for shard #{} is slow {}ms (rows {}).", shard, elapsed_ms, block.rows());
Expand Down
10 changes: 5 additions & 5 deletions src/Dictionaries/RangeHashedDictionary.h
Original file line number Diff line number Diff line change
Expand Up @@ -1227,7 +1227,7 @@ Pipe RangeHashedDictionary<dictionary_key_type>::read(const Names & column_names
DictionarySourceCoordinator::ReadColumnsFunc read_keys_func = [dictionary_copy = dictionary](
const Strings & attribute_names,
const DataTypes & result_types,
const Columns & key_columns,
const Columns & key_columns_,
const DataTypes,
const Columns &)
{
Expand All @@ -1238,15 +1238,15 @@ Pipe RangeHashedDictionary<dictionary_key_type>::read(const Names & column_names
Columns result;
result.reserve(attribute_names_size);

const ColumnPtr & key_column = key_columns.back();
const ColumnPtr & key_column = key_columns_.back();

const auto * key_to_index_column = typeid_cast<const ColumnUInt64 *>(key_column.get());
if (!key_to_index_column)
const auto * key_to_index_column_ = typeid_cast<const ColumnUInt64 *>(key_column.get());
if (!key_to_index_column_)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Dictionary {} read expect indexes column with type UInt64",
range_dictionary_ptr->getFullName());

const auto & data = key_to_index_column->getData();
const auto & data = key_to_index_column_->getData();

for (size_t i = 0; i < attribute_names_size; ++i)
{
Expand Down
52 changes: 26 additions & 26 deletions src/Dictionaries/RegExpTreeDictionary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,22 +198,22 @@ void RegExpTreeDictionary::initRegexNodes(Block & block)
Array keys = (*keys_column)[i].safeGet<Array>();
Array values = (*values_column)[i].safeGet<Array>();
size_t keys_size = keys.size();
for (size_t i = 0; i < keys_size; i++)
for (size_t j = 0; j < keys_size; j++)
{
const String & name = keys[i].safeGet<String>();
const String & value = values[i].safeGet<String>();
if (structure.hasAttribute(name))
const String & name_ = keys[j].safeGet<String>();
const String & value = values[j].safeGet<String>();
if (structure.hasAttribute(name_))
{
const auto & attr = structure.getAttribute(name);
const auto & attr = structure.getAttribute(name_);
auto string_pieces = createStringPieces(value, num_captures, regex, logger);
if (!string_pieces.empty())
{
node->attributes[name] = RegexTreeNode::AttributeValue{.field = values[i], .pieces = std::move(string_pieces)};
node->attributes[name_] = RegexTreeNode::AttributeValue{.field = values[j], .pieces = std::move(string_pieces)};
}
else
{
Field field = parseStringToField(values[i].safeGet<String>(), attr.type);
node->attributes[name] = RegexTreeNode::AttributeValue{.field = std::move(field)};
Field field = parseStringToField(values[j].safeGet<String>(), attr.type);
node->attributes[name_] = RegexTreeNode::AttributeValue{.field = std::move(field)};
}
}
}
Expand Down Expand Up @@ -424,23 +424,23 @@ bool RegExpTreeDictionary::setAttributes(
return attributes_to_set.size() == attributes.size();
visited_nodes.emplace(id);
const auto & node_attributes = regex_nodes.at(id)->attributes;
for (const auto & [name, value] : node_attributes)
for (const auto & [name_, value] : node_attributes)
{
if (!attributes.contains(name) || attributes_to_set.contains(name))
if (!attributes.contains(name_) || attributes_to_set.contains(name_))
continue;
if (value.containsBackRefs())
{
auto [updated_str, use_default] = processBackRefs(data, regex_nodes.at(id)->searcher, value.pieces);
if (use_default)
{
DefaultValueProvider default_value(attributes.at(name).null_value, defaults.at(name));
attributes_to_set[name] = default_value.getDefaultValue(key_index);
DefaultValueProvider default_value(attributes.at(name_).null_value, defaults.at(name_));
attributes_to_set[name_] = default_value.getDefaultValue(key_index);
}
else
attributes_to_set[name] = parseStringToField(updated_str, attributes.at(name).type);
attributes_to_set[name_] = parseStringToField(updated_str, attributes.at(name_).type);
}
else
attributes_to_set[name] = value.field;
attributes_to_set[name_] = value.field;
}

auto parent_id = regex_nodes.at(id)->parent_id;
Expand Down Expand Up @@ -541,11 +541,11 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
std::unordered_map<String, MutableColumnPtr> columns;

/// initialize columns
for (const auto & [name, attr] : attributes)
for (const auto & [name_, attr] : attributes)
{
auto col_ptr = attr.type->createColumn();
col_ptr->reserve(keys_offsets.size());
columns[name] = std::move(col_ptr);
columns[name_] = std::move(col_ptr);
}

UInt64 offset = 0;
Expand Down Expand Up @@ -628,25 +628,25 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
break;
}

for (const auto & [name, attr] : attributes)
for (const auto & [name_, attr] : attributes)
{
if (attributes_to_set.contains(name))
if (attributes_to_set.contains(name_))
continue;

DefaultValueProvider default_value(attr.null_value, defaults.at(name));
columns[name]->insert(default_value.getDefaultValue(key_idx));
DefaultValueProvider default_value(attr.null_value, defaults.at(name_));
columns[name_]->insert(default_value.getDefaultValue(key_idx));
}

/// insert to columns
for (const auto & [name, value] : attributes_to_set)
columns[name]->insert(value);
for (const auto & [name_, value] : attributes_to_set)
columns[name_]->insert(value);

offset = key_offset;
}

std::unordered_map<String, ColumnPtr> result;
for (auto & [name, mutable_ptr] : columns)
result.emplace(name, std::move(mutable_ptr));
for (auto & [name_, mutable_ptr] : columns)
result.emplace(name_, std::move(mutable_ptr));

return result;
}
Expand Down Expand Up @@ -684,8 +684,8 @@ Columns RegExpTreeDictionary::getColumns(
defaults);

Columns result;
for (const String & name : attribute_names)
result.push_back(columns_map.at(name));
for (const String & name_ : attribute_names)
result.push_back(columns_map.at(name_));

return result;
}
Expand Down
12 changes: 6 additions & 6 deletions src/Dictionaries/YAMLRegExpTreeDictionarySource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,23 +229,23 @@ void parseMatchNode(UInt64 parent_id, UInt64 & id, const YAML::Node & node, Resu
{
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Yaml match rule must contain key {}", key_name);
}
for (const auto & [key, node] : match)
for (const auto & [key, node_] : match)
{
if (key == key_name)
{
if (!node.IsScalar())
if (!node_.IsScalar())
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "`{}` should be a String", key_name);

attributes_to_insert.reg_exp = node.as<String>();
attributes_to_insert.reg_exp = node_.as<String>();
}
else if (structure.hasAttribute(key))
{
attributes_to_insert.keys.push_back(key);
attributes_to_insert.values.push_back(node.as<String>());
attributes_to_insert.values.push_back(node_.as<String>());
}
else if (node.IsSequence())
else if (node_.IsSequence())
{
parseMatchList(attributes_to_insert.id, id, node, result, key_name, structure);
parseMatchList(attributes_to_insert.id, id, node_, result, key_name, structure);
}
/// unknown attributes.
}
Expand Down
8 changes: 4 additions & 4 deletions src/Functions/FunctionsExternalDictionaries.h
Original file line number Diff line number Diff line change
Expand Up @@ -1068,11 +1068,11 @@ class FunctionDictGetDescendantsExecutable final : public IExecutableFunction
FunctionDictGetDescendantsExecutable(
String name_,
size_t level_,
DictionaryHierarchicalParentToChildIndexPtr hierarchical_parent_to_child_index,
DictionaryHierarchicalParentToChildIndexPtr hierarchical_parent_to_child_index_,
std::shared_ptr<FunctionDictHelper> dictionary_helper_)
: name(std::move(name_))
, level(level_)
, hierarchical_parent_to_child_index(std::move(hierarchical_parent_to_child_index))
, hierarchical_parent_to_child_index(std::move(hierarchical_parent_to_child_index_))
, dictionary_helper(std::move(dictionary_helper_))
{}

Expand Down Expand Up @@ -1110,13 +1110,13 @@ class FunctionDictGetDescendantsBase final : public IFunctionBase
const DataTypes & argument_types_,
const DataTypePtr & result_type_,
size_t level_,
DictionaryHierarchicalParentToChildIndexPtr hierarchical_parent_to_child_index,
DictionaryHierarchicalParentToChildIndexPtr hierarchical_parent_to_child_index_,
std::shared_ptr<FunctionDictHelper> helper_)
: name(std::move(name_))
, argument_types(argument_types_)
, result_type(result_type_)
, level(level_)
, hierarchical_parent_to_child_index(std::move(hierarchical_parent_to_child_index))
, hierarchical_parent_to_child_index(std::move(hierarchical_parent_to_child_index_))
, helper(std::move(helper_))
{}

Expand Down