Skip to content

Commit

Permalink
util/mr_cache: Add framework for memory registration cache
Browse files Browse the repository at this point in the history
This is derived from work by Dmitry Gladkov, which was based on
the registration cache in the gni provider.

The interface for the cache is comprised of initialization and
cleanup routines, plus two calls: search and delete.  Search
will first search the cache for a region that contains the
provided input region.  If no existing region is found, the
new region is added to the cache.  Delete marks that the user
is done with the region.  Every search call should be paired
with a delete call.

If caching is enabled, the freeing of a delete region will be
deferred until it is both no longer being accessed, and is the
region that has the oldest access time.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
  • Loading branch information
shefty committed Dec 12, 2017
1 parent cf76c71 commit 6f2e338
Show file tree
Hide file tree
Showing 4 changed files with 306 additions and 1 deletion.
3 changes: 2 additions & 1 deletion Makefile.am
Expand Up @@ -61,7 +61,8 @@ common_srcs = \
prov/util/src/util_mr_map.c \
prov/util/src/util_ns.c \
prov/util/src/util_shm.c \
prov/util/src/util_mem_monitor.c
prov/util/src/util_mem_monitor.c\
prov/util/src/util_mr_cache.c

if MACOS
common_srcs += src/unix/osd.c
Expand Down
25 changes: 25 additions & 0 deletions include/fi_iov.h
Expand Up @@ -36,6 +36,7 @@

#include "config.h"

#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <sys/uio.h>
Expand Down Expand Up @@ -82,6 +83,30 @@ ofi_copy_from_iov(void *buf, uint64_t bufsize,
OFI_COPY_IOV_TO_BUF);
}

static inline void *
ofi_iov_end(const struct iovec *iov)
{
return ((char *) iov->iov_base) + iov->iov_len;
}

static inline bool
ofi_iov_left(const struct iovec *iov1, const struct iovec *iov2)
{
return ofi_iov_end(iov1) < iov2->iov_base;
}

static inline bool
ofi_iov_right(const struct iovec *iov1, const struct iovec *iov2)
{
return iov1->iov_base > ofi_iov_end(iov2);
}

static inline bool
ofi_iov_within(const struct iovec *iov1, const struct iovec *iov2)
{
return (iov1->iov_base >= iov2->iov_base) &&
(ofi_iov_end(iov1) <= ofi_iov_end(iov2));
}

#endif /* IOV_H */

48 changes: 48 additions & 0 deletions include/ofi_mr.h
Expand Up @@ -38,11 +38,14 @@
#endif /* HAVE_CONFIG_H */

#include <inttypes.h>
#include <stdbool.h>

#include <fi.h>
#include <fi_atom.h>
#include <fi_lock.h>
#include <fi_list.h>
#include <rbtree.h>


#define OFI_MR_BASIC_MAP (FI_MR_ALLOCATED | FI_MR_PROV_KEY | FI_MR_VIRT_ADDR)

Expand Down Expand Up @@ -74,6 +77,7 @@ static inline uint64_t ofi_mr_get_prov_mode(uint32_t version,
}
}


/*
* Memory notifier - Report memory mapping changes to address ranges
*/
Expand Down Expand Up @@ -116,6 +120,7 @@ void ofi_monitor_unsubscribe(void *addr, size_t len,
struct ofi_subscription *subscription);
struct ofi_subscription *ofi_monitor_get_event(struct ofi_notification_queue *nq);


/*
* MR map
*/
Expand All @@ -141,4 +146,47 @@ int ofi_mr_map_verify(struct ofi_mr_map *map, uintptr_t *io_addr,
size_t len, uint64_t key, uint64_t access,
void **context);


/*
* Memory registration cache
*/

struct ofi_mr_entry {
struct iovec iov;
uint64_t access; /* TODO */
unsigned int retired:1;
int use_cnt;
struct dlist_entry lru_entry;
uint8_t data[];
};

struct ofi_mr_cache {
struct util_domain *domain;
size_t size;
size_t entry_data_size;

RbtHandle mr_tree;
struct dlist_entry lru_list;

uint64_t cached_cnt;
uint64_t search_cnt;
uint64_t delete_cnt;
uint64_t hit_cnt;

int (*add_region)(struct ofi_mr_cache *cache,
struct ofi_mr_entry *entry);
void (*delete_region)(struct ofi_mr_cache *cache,
struct ofi_mr_entry *entry);
};

int ofi_mr_cache_init(struct util_domain *domain, struct ofi_mr_cache *cache);
void ofi_mr_cache_cleanup(struct ofi_mr_cache *cache);

/* Caller must provide locking around calls */
bool ofi_mr_cache_flush(struct ofi_mr_cache *cache);
int ofi_mr_cache_search(struct ofi_mr_cache *cache, const struct fi_mr_attr *attr,
struct ofi_mr_entry **entry);
void ofi_mr_cache_delete(struct ofi_mr_cache *cache, struct ofi_mr_entry *entry);


#endif /* _OFI_MR_H_ */
231 changes: 231 additions & 0 deletions prov/util/src/util_mr_cache.c
@@ -0,0 +1,231 @@
/*
* Copyright (c) 2016-2017 Cray Inc. All rights reserved.
* Copyright (c) 2017 Intel Corporation, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/

#include <config.h>
#include <stdlib.h>
#include <fi_util.h>
#include <fi_iov.h>
#include <ofi_mr.h>
#include <fi_list.h>


static int util_mr_find_overlap(void *a, void *b)
{
struct iovec *iov1 = a, *iov2 = b;

if (ofi_iov_left(iov1, iov2))
return -1;
else if (ofi_iov_right(iov1, iov2))
return 1;
else
return 0;
}

static void util_mr_free_entry(struct ofi_mr_cache *cache,
struct ofi_mr_entry *entry)
{
FI_DBG(cache->domain->prov, FI_LOG_MR,
"freeing %p\n", entry->iov.iov_base);
cache->delete_region(cache, entry);
free(entry);
cache->cached_cnt--;
}

bool ofi_mr_cache_flush(struct ofi_mr_cache *cache)
{
struct ofi_mr_entry *entry;

if (!dlist_empty(&cache->lru_list)) {
dlist_pop_front(&cache->lru_list, struct ofi_mr_entry,
entry, lru_entry);
util_mr_free_entry(cache, entry);
return true;
} else {
return false;
}
}

void ofi_mr_cache_delete(struct ofi_mr_cache *cache, struct ofi_mr_entry *entry)
{
FI_DBG(cache->domain->prov, FI_LOG_MR,
"delete %p\n", entry->iov.iov_base);
cache->delete_cnt++;

if (--entry->use_cnt == 0) {
if (entry->retired) {
util_mr_free_entry(cache, entry);
} else {
dlist_insert_tail(&entry->lru_entry, &cache->lru_list);
}
}
}

static int
util_mr_cache_create(struct ofi_mr_cache *cache, const struct iovec *iov,
uint64_t access, struct ofi_mr_entry **entry)
{
int ret;

FI_DBG(cache->domain->prov, FI_LOG_MR,
"creating %p\n", iov->iov_base);
*entry = calloc(1, sizeof(**entry) + cache->entry_data_size);
if (!*entry)
return -FI_ENOMEM;

(*entry)->iov = *iov;
(*entry)->access = access;
(*entry)->use_cnt = 1;

ret = cache->add_region(cache, *entry);
if (ret) {
free(*entry);
return ret;
}

if (++cache->cached_cnt > cache->size) {
(*entry)->retired = 1;
} else {
if (rbtInsert(cache->mr_tree, &(*entry)->iov, *entry)) {
util_mr_free_entry(cache, *entry);
return -FI_ENOMEM;
}
}

return 0;
}

static int
util_mr_cache_merge(struct ofi_mr_cache *cache, const struct fi_mr_attr *attr,
RbtIterator iter, struct ofi_mr_entry **entry)
{
struct iovec iov, *old_iov;
struct ofi_mr_entry *old_entry;

iov = *attr->mr_iov;
do {
rbtKeyValue(cache->mr_tree, iter, (void **) &old_iov,
(void **) &old_entry);

iov.iov_base = MIN(iov.iov_base, old_iov->iov_base);
iov.iov_len = ((uintptr_t)
MAX(ofi_iov_end(&iov), ofi_iov_end(old_iov))) -
((uintptr_t) iov.iov_base);

rbtErase(cache->mr_tree, iter);
if (old_entry->use_cnt) {
old_entry->retired = 1;
} else {
dlist_remove(&old_entry->lru_entry);
util_mr_free_entry(cache, old_entry);
}

} while ((iter = rbtFind(cache->mr_tree, &iov)));

return util_mr_cache_create(cache, &iov, attr->access, entry);
}

int ofi_mr_cache_search(struct ofi_mr_cache *cache, const struct fi_mr_attr *attr,
struct ofi_mr_entry **entry)
{
RbtIterator iter;
struct iovec *iov;

assert(attr->iov_count == 1);
FI_DBG(cache->domain->prov, FI_LOG_MR,
"search %p\n", attr->mr_iov->iov_base);
cache->search_cnt++;

while ((cache->cached_cnt >= cache->size) && ofi_mr_cache_flush(cache))
;

iter = rbtFind(cache->mr_tree, (void *) attr->mr_iov);
if (!iter) {
return util_mr_cache_create(cache, attr->mr_iov,
attr->access, entry);
}

rbtKeyValue(cache->mr_tree, iter, (void **) &iov, (void **) entry);

if (!ofi_iov_within(attr->mr_iov, iov))
return util_mr_cache_merge(cache, attr, iter, entry);

cache->hit_cnt++;
if ((*entry)->use_cnt++ == 0)
dlist_remove(&(*entry)->lru_entry);

return 0;
}

void ofi_mr_cache_cleanup(struct ofi_mr_cache *cache)
{
struct ofi_mr_entry *entry;
struct dlist_entry *tmp;
RbtIterator iter;

FI_INFO(cache->domain->prov, FI_LOG_MR, "MR cache stats: "
"searches %" PRIu64 ", deletes %" PRIu64 ", hits %" PRIu64 "\n",
cache->search_cnt, cache->delete_cnt, cache->hit_cnt);

dlist_foreach_container_safe(&cache->lru_list, struct ofi_mr_entry,
entry, lru_entry, tmp) {
assert(entry->use_cnt == 0);
iter = rbtFind(cache->mr_tree, &entry->iov);
assert(iter);
rbtErase(cache->mr_tree, iter);
dlist_remove(&entry->lru_entry);
util_mr_free_entry(cache, entry);
}
rbtDelete(cache->mr_tree);
ofi_atomic_dec32(&cache->domain->ref);
assert(cache->cached_cnt == 0);
}

int ofi_mr_cache_init(struct util_domain *domain, struct ofi_mr_cache *cache)
{
assert(cache->add_region && cache->delete_region);

cache->mr_tree = rbtNew(util_mr_find_overlap);
if (!cache->mr_tree)
return -FI_ENOMEM;

cache->domain = domain;
ofi_atomic_inc32(&domain->ref);

dlist_init(&cache->lru_list);
cache->cached_cnt = 0;
cache->search_cnt = 0;
cache->delete_cnt = 0;
cache->hit_cnt = 0;

return 0;
}

0 comments on commit 6f2e338

Please sign in to comment.