Commit 52bd19f7 authored by Robin Holt's avatar Robin Holt Committed by Linus Torvalds

epoll: convert max_user_watches to long

On a 16TB machine, max_user_watches has an integer overflow.  Convert it
to use a long and handle the associated fallout.
Signed-off-by: default avatarRobin Holt <holt@sgi.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: default avatarDavide Libenzi <davidel@xmailserver.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 65329bf4
......@@ -217,7 +217,7 @@ struct ep_send_events_data {
* Configuration options available inside /proc/sys/fs/epoll/
*/
/* Maximum number of epoll watched descriptors, per user */
static int max_user_watches __read_mostly;
static long max_user_watches __read_mostly;
/*
* This mutex is used to serialize ep_free() and eventpoll_release_file().
......@@ -240,16 +240,18 @@ static struct kmem_cache *pwq_cache __read_mostly;
#include <linux/sysctl.h>
static int zero;
static long zero;
static long long_max = LONG_MAX;
ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
.maxlen = sizeof(int),
.maxlen = sizeof(max_user_watches),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &zero,
.extra2 = &long_max,
},
{ }
};
......@@ -561,7 +563,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
/* At this point it is safe to free the eventpoll item */
kmem_cache_free(epi_cache, epi);
atomic_dec(&ep->user->epoll_watches);
atomic_long_dec(&ep->user->epoll_watches);
return 0;
}
......@@ -898,11 +900,12 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
{
int error, revents, pwake = 0;
unsigned long flags;
long user_watches;
struct epitem *epi;
struct ep_pqueue epq;
if (unlikely(atomic_read(&ep->user->epoll_watches) >=
max_user_watches))
user_watches = atomic_long_read(&ep->user->epoll_watches);
if (unlikely(user_watches >= max_user_watches))
return -ENOSPC;
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
return -ENOMEM;
......@@ -966,7 +969,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
spin_unlock_irqrestore(&ep->lock, flags);
atomic_inc(&ep->user->epoll_watches);
atomic_long_inc(&ep->user->epoll_watches);
/* We have to call this outside the lock */
if (pwake)
......@@ -1426,6 +1429,7 @@ static int __init eventpoll_init(void)
*/
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
......
......@@ -683,7 +683,7 @@ struct user_struct {
atomic_t fanotify_listeners;
#endif
#ifdef CONFIG_EPOLL
atomic_t epoll_watches; /* The number of file descriptors currently watched */
atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
#endif
#ifdef CONFIG_POSIX_MQUEUE
/* protected by mq_lock */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment