Skip to content

Commit

Permalink
Merge pull request #49995 from azat/dict/server-memory
Browse files Browse the repository at this point in the history
Charge only server memory for dictionaries
  • Loading branch information
serxa committed May 22, 2023
2 parents e7c0b75 + 0586a27 commit b1d5ef5
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 0 deletions.
7 changes: 7 additions & 0 deletions src/Dictionaries/HashedDictionary.cpp
Expand Up @@ -9,6 +9,7 @@
#include <Common/logger_useful.h>
#include <Common/ConcurrentBoundedQueue.h>
#include <Common/CurrentMetrics.h>
#include <Common/MemoryTrackerBlockerInThread.h>

#include <Core/Defines.h>

Expand Down Expand Up @@ -68,6 +69,9 @@ class ParallelDictionaryLoader : public boost::noncopyable
shards_queues[shard].emplace(backlog);
pool.scheduleOrThrowOnError([this, shard, thread_group = CurrentThread::getGroup()]
{
/// Do not account memory that was occupied by the dictionaries for the query/user context.
MemoryTrackerBlockerInThread memory_blocker;

if (thread_group)
CurrentThread::attachToGroupIfDetached(thread_group);
setThreadName("HashedDictLoad");
Expand Down Expand Up @@ -226,6 +230,9 @@ HashedDictionary<dictionary_key_type, sparse, sharded>::~HashedDictionary()

pool.trySchedule([&container, thread_group = CurrentThread::getGroup()]
{
/// Do not account memory that was occupied by the dictionaries for the query/user context.
MemoryTrackerBlockerInThread memory_blocker;

if (thread_group)
CurrentThread::attachToGroupIfDetached(thread_group);
setThreadName("HashedDictDtor");
Expand Down
4 changes: 4 additions & 0 deletions src/Interpreters/ExternalLoader.cpp
Expand Up @@ -2,6 +2,7 @@

#include <mutex>
#include <pcg_random.hpp>
#include <Common/MemoryTrackerBlockerInThread.h>
#include <Common/Config/AbstractConfigurationComparison.h>
#include <Common/Exception.h>
#include <Common/StringUtils/StringUtils.h>
Expand Down Expand Up @@ -978,6 +979,9 @@ class ExternalLoader::LoadingDispatcher : private boost::noncopyable
if (thread_group)
CurrentThread::attachToGroup(thread_group);

/// Do not account memory that was occupied by the dictionaries for the query/user context.
MemoryTrackerBlockerInThread memory_blocker;

LOG_TRACE(log, "Start loading object '{}'", name);
try
{
Expand Down
Empty file.
32 changes: 32 additions & 0 deletions tests/queries/0_stateless/02760_dictionaries_memory.sql.j2
@@ -0,0 +1,32 @@
-- Tags: long
-- Tag long: in parallel runs could exceed 60 seconds
{# vim: ft=sql #}

SET max_memory_usage=0;
DROP DICTIONARY IF EXISTS dict;
DROP TABLE IF EXISTS dict_data;

CREATE TABLE dict_data (key UInt64, value UInt64) Engine=Memory();
INSERT INTO dict_data SELECT number, number%10 FROM numbers(3_000_000);

SET max_memory_usage='4Mi';
{% for layout in [
'FLAT(INITIAL_ARRAY_SIZE 3_000_000 MAX_ARRAY_SIZE 3_000_000)',
'HASHED()',
'HASHED_ARRAY()',
'SPARSE_HASHED()',
'SPARSE_HASHED(SHARDS 2 /* shards are special, they use threads */)',
] %}
CREATE DICTIONARY dict (key UInt64, value UInt64) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE dict_data)) LIFETIME(0) LAYOUT({{layout}});
SYSTEM RELOAD DICTIONARY dict;
-- assert that dictionary in memory takes more than 20MB, that way for each
-- shard we will have 10MB, that way we ensure that the allocations will be
-- definitely correct for the memory tracker to hit the MEMORY_LIMIT_EXCEEDED
-- error.
SELECT throwIf(bytes_allocated < 20e6, 'Memory constraints does not matched for layout {{layout}}') FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict' FORMAT Null;
DROP DICTIONARY dict;

CREATE DICTIONARY dict (key UInt64, value UInt64) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE dict_data)) LIFETIME(0) LAYOUT({{layout}});
SELECT dictGet('dict', 'value', 1::UInt64) FORMAT Null;
DROP DICTIONARY dict;
{% endfor %}

0 comments on commit b1d5ef5

Please sign in to comment.