Skip to content

Commit

Permalink
epoll: drop max_user_instances and rely only on max_user_watches
Browse files Browse the repository at this point in the history
Linus suggested to put limits where the money is, and max_user_watches
already does that w/out the need of max_user_instances.  That has the
advantage to mitigate the potential DoS while allowing pretty generous
default behavior.

Allowing top 4% of low memory (per user) to be allocated in epoll watches,
we have:

LOMEM    MAX_WATCHES (per user)
512MB    ~178000
1GB      ~356000
2GB      ~712000

A box with 512MB of lomem, will meet some challenge in hitting 180K
watches, socket buffers math teaches us.  No more max_user_instances
limits then.

Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
Cc: Willy Tarreau <w@1wt.eu>
Cc: Michael Kerrisk <mtk.manpages@googlemail.com>
Cc: Bron Gondwana <brong@fastmail.fm>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
davidel authored and torvalds committed Jan 30, 2009
1 parent 3095eb8 commit 9df04e1
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 19 deletions.
22 changes: 4 additions & 18 deletions fs/eventpoll.c
Expand Up @@ -234,8 +234,6 @@ struct ep_pqueue {
/*
* Configuration options available inside /proc/sys/fs/epoll/
*/
/* Maximum number of epoll devices, per user */
static int max_user_instances __read_mostly;
/* Maximum number of epoll watched descriptors, per user */
static int max_user_watches __read_mostly;

Expand All @@ -260,14 +258,6 @@ static struct kmem_cache *pwq_cache __read_mostly;
static int zero;

ctl_table epoll_table[] = {
{
.procname = "max_user_instances",
.data = &max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_user_watches",
.data = &max_user_watches,
Expand Down Expand Up @@ -491,7 +481,6 @@ static void ep_free(struct eventpoll *ep)

mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
atomic_dec(&ep->user->epoll_devs);
free_uid(ep->user);
kfree(ep);
}
Expand Down Expand Up @@ -581,10 +570,6 @@ static int ep_alloc(struct eventpoll **pep)
struct eventpoll *ep;

user = get_current_user();
error = -EMFILE;
if (unlikely(atomic_read(&user->epoll_devs) >=
max_user_instances))
goto free_uid;
error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
Expand Down Expand Up @@ -1141,7 +1126,6 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
flags & O_CLOEXEC);
if (fd < 0)
ep_free(ep);
atomic_inc(&ep->user->epoll_devs);

error_return:
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
Expand Down Expand Up @@ -1366,8 +1350,10 @@ static int __init eventpoll_init(void)
struct sysinfo si;

si_meminfo(&si);
max_user_instances = 128;
max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) /
/*
* Allows top 4% of lomem to be allocated for epoll watches (per user).
*/
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;

/* Initialize the structure used to perform safe poll wait head wake ups */
Expand Down
1 change: 0 additions & 1 deletion include/linux/sched.h
Expand Up @@ -630,7 +630,6 @@ struct user_struct {
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif
#ifdef CONFIG_EPOLL
atomic_t epoll_devs; /* The number of epoll descriptors currently open */
atomic_t epoll_watches; /* The number of file descriptors currently watched */
#endif
#ifdef CONFIG_POSIX_MQUEUE
Expand Down

0 comments on commit 9df04e1

Please sign in to comment.