Skip to content

Commit

Permalink
io_uring: Add KASAN support for alloc_caches
Browse files Browse the repository at this point in the history
Add support for KASAN in the alloc_caches (apoll and netmsg_cache).
Thus, if something touches the unused caches, it will raise a KASAN
warning/exception.

It poisons the object when the object is put to the cache, and unpoisons
it when the object is gotten or freed.

Signed-off-by: Breno Leitao <leitao@debian.org>
Link: https://lore.kernel.org/r/20230221135721.3230763-2-leitao@debian.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
leitao authored and axboe committed Feb 21, 2023
1 parent 7e09bef commit b808f80
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 7 deletions.
11 changes: 8 additions & 3 deletions io_uring/alloc_cache.h
Expand Up @@ -16,19 +16,23 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
cache->nr_cached++;
wq_stack_add_head(&entry->node, &cache->list);
/* KASAN poisons object */
kasan_slab_free_mempool(entry);
return true;
}
return false;
}

static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache,
size_t size)
{
struct io_wq_work_node *node;
struct io_cache_entry *entry;

if (cache->list.next) {
node = cache->list.next;
entry = container_of(node, struct io_cache_entry, node);
kasan_unpoison_range(entry, size);
cache->list.next = node->next;
return entry;
}
Expand All @@ -43,11 +47,12 @@ static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
}

static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(struct io_cache_entry *))
void (*free)(struct io_cache_entry *),
size_t size)
{
struct io_cache_entry *entry;

while ((entry = io_alloc_cache_get(cache))) {
while ((entry = io_alloc_cache_get(cache, size))) {
free(entry);
}

Expand Down
12 changes: 10 additions & 2 deletions io_uring/io_uring.c
Expand Up @@ -2772,6 +2772,15 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
mutex_unlock(&ctx->uring_lock);
}

static __cold void io_uring_acache_free(struct io_ring_ctx *ctx)
{

io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free,
sizeof(struct async_poll));
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free,
sizeof(struct io_async_msghdr));
}

static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
io_sq_thread_finish(ctx);
Expand All @@ -2787,8 +2796,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
__io_sqe_files_unregister(ctx);
io_cqring_overflow_kill(ctx);
io_eventfd_unregister(ctx);
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
io_uring_acache_free(ctx);
mutex_unlock(&ctx->uring_lock);
io_destroy_buffers(ctx);
if (ctx->sq_creds)
Expand Down
2 changes: 1 addition & 1 deletion io_uring/net.c
Expand Up @@ -140,7 +140,7 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
struct io_async_msghdr *hdr;

if (!(issue_flags & IO_URING_F_UNLOCKED)) {
entry = io_alloc_cache_get(&ctx->netmsg_cache);
entry = io_alloc_cache_get(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
if (entry) {
hdr = container_of(entry, struct io_async_msghdr, cache);
hdr->free_iov = NULL;
Expand Down
2 changes: 1 addition & 1 deletion io_uring/poll.c
Expand Up @@ -661,7 +661,7 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
apoll = req->apoll;
kfree(apoll->double_poll);
} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
entry = io_alloc_cache_get(&ctx->apoll_cache);
entry = io_alloc_cache_get(&ctx->apoll_cache, sizeof(struct async_poll));
if (entry == NULL)
goto alloc_apoll;
apoll = container_of(entry, struct async_poll, cache);
Expand Down

0 comments on commit b808f80

Please sign in to comment.