Skip to content

Commit

Permalink
mgr: cleanup
Browse files Browse the repository at this point in the history
Signed-off-by: Pere Diaz Bou <pdiazbou@redhat.com>
  • Loading branch information
pereman2 committed Sep 17, 2021
1 parent 2c0aec6 commit 8df35d9
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 27 deletions.
5 changes: 5 additions & 0 deletions qa/tasks/ceph_test_case.py
Expand Up @@ -86,6 +86,11 @@ def config_set(self, section, key, value):
self._mon_configs_set.add((section, key))
self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "set", section, key, str(value))

def cluster_cmd(self, command: str):
assert self.ceph_cluster is not None
return self.ceph_cluster.mon_manager.raw_cluster_cmd(*(command.split(" ")))


def assert_cluster_log(self, expected_pattern, invert_match=False,
timeout=10, watch_channel=None):
"""
Expand Down
32 changes: 18 additions & 14 deletions qa/tasks/mgr/test_cache.py
Expand Up @@ -8,38 +8,39 @@ def setUp(self):
super(TestCache, self).setUp()
self.setup_mgrs()
self._load_module("cli_api")
self.enable_cache(10)
self.ttl = 10
self.enable_cache(self.ttl)

def tearDown(self):
self.disable_cache()

def get_hit_miss_ratio(self):
perf_dump_command = f"daemon mgr.{self.mgr_cluster.get_active_id()} perf dump"
perf_dump_res = self.mgr_cluster.mon_manager.raw_cluster_cmd(*(perf_dump_command.split(" ")))
perf_dump_res = self.cluster_cmd(perf_dump_command)
perf_dump = json.loads(perf_dump_res)
h = perf_dump["mgr"]["cache_hit"]
m = perf_dump["mgr"]["cache_miss"]
return int(h), int(m)

def enable_cache(self, ttl):
set_ttl = f"config set mgr mgr_ttl_cache_expire_seconds {ttl}"
self.mgr_cluster.mon_manager.raw_cluster_cmd(*(set_ttl.split(" ")))
self.cluster_cmd(set_ttl)

def disable_cache(self):
set_ttl = "config set mgr mgr_ttl_cache_expire_seconds 0"
self.mgr_cluster.mon_manager.raw_cluster_cmd(*(set_ttl.split(" ")))
self.cluster_cmd(set_ttl)


def test_init_cache(self):
get_ttl = "config get mgr mgr_ttl_cache_expire_seconds"
res = self.mgr_cluster.mon_manager.raw_cluster_cmd(*(get_ttl.split(" ")))
res = self.cluster_cmd(get_ttl)
self.assertEquals(int(res), 10)

def test_health_not_cached(self):
get_health = "mgr api get health"

h_start, m_start = self.get_hit_miss_ratio()
self.mgr_cluster.mon_manager.raw_cluster_cmd(*(get_health.split(" ")))
self.cluster_cmd(get_health)
h, m = self.get_hit_miss_ratio()

self.assertEquals(h, h_start)
Expand All @@ -48,8 +49,10 @@ def test_health_not_cached(self):
def test_osdmap(self):
get_osdmap = "mgr api get osd_map"

self.mgr_cluster.mon_manager.raw_cluster_cmd(*(get_osdmap.split(" ")))
res = self.mgr_cluster.mon_manager.raw_cluster_cmd(*(get_osdmap.split(" ")))
# store in cache
self.cluster_cmd(get_osdmap)
# get from cache
res = self.cluster_cmd(get_osdmap)
osd_map = json.loads(res)
self.assertIn("osds", osd_map)
self.assertGreater(len(osd_map["osds"]), 0)
Expand All @@ -60,20 +63,21 @@ def test_osdmap(self):
def test_hit_miss_ratio(self):
get_osdmap = "mgr api get osd_map"

import time
# wait for clear
time.sleep(15)

hit_start, miss_start = self.get_hit_miss_ratio()

def wait_miss():
self.cluster_cmd(get_osdmap)
_, m = self.get_hit_miss_ratio()
return m == miss_start + 1

# Miss, add osd_map to cache
self.mgr_cluster.mon_manager.raw_cluster_cmd(*(get_osdmap.split(" ")))
self.wait_until_true(wait_miss, self.ttl + 5)
h, m = self.get_hit_miss_ratio()
self.assertEquals(h, hit_start)
self.assertEquals(m, miss_start+1)

# Hit, get osd_map from cache
self.mgr_cluster.mon_manager.raw_cluster_cmd(*(get_osdmap.split(" ")))
self.cluster_cmd(get_osdmap)
h, m = self.get_hit_miss_ratio()
self.assertEquals(h, hit_start+1)
self.assertEquals(m, miss_start+1)
10 changes: 6 additions & 4 deletions src/mgr/ActivePyModules.cc
Expand Up @@ -175,7 +175,7 @@ void ActivePyModules::update_cache_metrics() {
perfcounter->set(l_mgr_cache_miss, hit_miss_ratio.second);
}

PyObject *ActivePyModules::get_python(const std::string &what)
PyObject *ActivePyModules::cacheable_get_python(const std::string &what)
{
uint64_t ttl_seconds = g_conf().get_val<uint64_t>("mgr_ttl_cache_expire_seconds");
if(ttl_seconds > 0) {
Expand All @@ -187,22 +187,24 @@ PyObject *ActivePyModules::get_python(const std::string &what)
} catch (std::out_of_range& e) {}
}

PyObject *obj = _get_python(what);
if(ttl_seconds && ttl_cache.is_allowed(what)) {
PyObject *obj = get_python(what);
if(ttl_seconds && ttl_cache.is_cacheable(what)) {
ttl_cache.insert(what, obj);
Py_INCREF(obj);
}
update_cache_metrics();
return obj;
}

PyObject *ActivePyModules::_get_python(const std::string &what)
PyObject *ActivePyModules::get_python(const std::string &what)
{
uint64_t ttl_seconds = g_conf().get_val<uint64_t>("mgr_ttl_cache_expire_seconds");

PyFormatter pf;
PyJSONFormatter jf;
// Use PyJSONFormatter if TTL cache is enabled.
Formatter &f = ttl_seconds ? (Formatter&)jf : (Formatter&)pf;

// Drop the GIL, as most of the following blocks will block on
// a mutex -- they are all responsible for re-taking the GIL before
// touching the PyFormatter instance or returning from the function.
Expand Down
2 changes: 1 addition & 1 deletion src/mgr/ActivePyModules.h
Expand Up @@ -82,8 +82,8 @@ class ActivePyModules
MonClient &get_monc() {return monc;}
Objecter &get_objecter() {return objecter;}
Client &get_client() {return client;}
PyObject *cacheable_get_python(const std::string &what);
PyObject *get_python(const std::string &what);
PyObject *_get_python(const std::string &what);
PyObject *get_server_python(const std::string &hostname);
PyObject *list_servers_python();
PyObject *get_metadata_python(
Expand Down
3 changes: 1 addition & 2 deletions src/mgr/BaseMgrModule.cc
Expand Up @@ -375,7 +375,7 @@ ceph_state_get(BaseMgrModule *self, PyObject *args)
return NULL;
}

return self->py_modules->get_python(what);
return self->py_modules->cacheable_get_python(what);
}


Expand Down Expand Up @@ -1594,4 +1594,3 @@ PyTypeObject BaseMgrModuleType = {
0, /* tp_alloc */
BaseMgrModule_new, /* tp_new */
};

6 changes: 3 additions & 3 deletions src/mgr/TTLCache.cc
Expand Up @@ -11,11 +11,11 @@ void TTLCacheBase<Key, Value>::insert(Key key, Value value) {
auto now = std::chrono::steady_clock::now();

if (!ttl) return;
uint16_t ttl_spread = ttl * this->ttl_spread_percent;
int16_t random_ttl_offset =
ttl * ttl_spread_ratio * (2l * rand() / float(RAND_MAX) - 1);
// in order not to have spikes of misses we increase or decrease by 25% of
// the ttl
uint16_t spreaded_ttl =
ttl + (rand() / float(RAND_MAX) - .5) * 2 * ttl_spread;
int16_t spreaded_ttl = ttl + random_ttl_offset;
auto expiration_date = now + std::chrono::seconds(spreaded_ttl);
cache::insert(key, {value, expiration_date});
}
Expand Down
6 changes: 3 additions & 3 deletions src/mgr/TTLCache.h
Expand Up @@ -57,7 +57,7 @@ template <class Key, class Value> class Cache {
std::pair<uint64_t, uint64_t> get_hit_miss_ratio() {
return std::make_pair(hits.load(), misses.load());
}
bool is_allowed(Key key) {
bool is_cacheable(Key key) {
for (auto k : allowed_keys) {
if (key == k) return true;
}
Expand All @@ -73,7 +73,7 @@ template <class Key, class Value>
class TTLCacheBase : public Cache<Key, std::pair<Value, ttl_time_point>> {
private:
uint16_t ttl;
float ttl_spread_percent;
float ttl_spread_ratio;
using value_type = std::pair<Value, ttl_time_point>;
using cache = Cache<Key, value_type>;

Expand All @@ -89,7 +89,7 @@ class TTLCacheBase : public Cache<Key, std::pair<Value, ttl_time_point>> {
public:
TTLCacheBase(uint16_t ttl_ = 0, uint16_t size = UINT16_MAX,
float spread = 0.25)
: Cache<Key, value_type>(size), ttl{ttl_}, ttl_spread_percent{spread} {}
: Cache<Key, value_type>(size), ttl{ttl_}, ttl_spread_ratio{spread} {}
~TTLCacheBase(){};
void insert(Key key, Value value);
Value get(Key key);
Expand Down

0 comments on commit 8df35d9

Please sign in to comment.