Commit 9731bc98 authored by Jens Axboe's avatar Jens Axboe

io_uring: impose max limit on apoll cache

Caches like this tend to grow to the peak size, and then never get any
smaller. Impose a max limit on the size, to prevent it from growing too
big.

A somewhat randomly chosen 512 is the max size we'll allow the cache
to get. If a batch of frees come in and would bring it over that, we
simply start kfree'ing the surplus.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9b797a37
...@@ -160,6 +160,7 @@ struct io_ev_fd { ...@@ -160,6 +160,7 @@ struct io_ev_fd {
struct io_alloc_cache { struct io_alloc_cache {
struct hlist_head list; struct hlist_head list;
unsigned int nr_cached;
}; };
struct io_ring_ctx { struct io_ring_ctx {
......
#ifndef IOU_ALLOC_CACHE_H #ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H #define IOU_ALLOC_CACHE_H
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 512
struct io_cache_entry { struct io_cache_entry {
struct hlist_node node; struct hlist_node node;
}; };
static inline void io_alloc_cache_put(struct io_alloc_cache *cache, static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
struct io_cache_entry *entry) struct io_cache_entry *entry)
{ {
if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
cache->nr_cached++;
hlist_add_head(&entry->node, &cache->list); hlist_add_head(&entry->node, &cache->list);
return true;
}
return false;
} }
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
...@@ -26,6 +36,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c ...@@ -26,6 +36,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
static inline void io_alloc_cache_init(struct io_alloc_cache *cache) static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{ {
INIT_HLIST_HEAD(&cache->list); INIT_HLIST_HEAD(&cache->list);
cache->nr_cached = 0;
} }
static inline void io_alloc_cache_free(struct io_alloc_cache *cache, static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
...@@ -37,5 +48,6 @@ static inline void io_alloc_cache_free(struct io_alloc_cache *cache, ...@@ -37,5 +48,6 @@ static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
hlist_del(node); hlist_del(node);
free(container_of(node, struct io_cache_entry, node)); free(container_of(node, struct io_cache_entry, node));
} }
cache->nr_cached = 0;
} }
#endif #endif
...@@ -1181,7 +1181,8 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) ...@@ -1181,7 +1181,8 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll) if (apoll->double_poll)
kfree(apoll->double_poll); kfree(apoll->double_poll);
io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache); if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
kfree(apoll);
req->flags &= ~REQ_F_POLLED; req->flags &= ~REQ_F_POLLED;
} }
if (req->flags & IO_REQ_LINK_FLAGS) if (req->flags & IO_REQ_LINK_FLAGS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment