Commit c727b4c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (incoming fixes from Andrew)

Merge misc fixes from Andrew Morton:
 "The audit fixes have been floating around for a while - Al and Eric
  aren't responding to either myself or Kees so I asked Kees to
  re-review them and here they are."

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (22 commits)
  lib/rbtree.c: avoid the use of non-static __always_inline
  MAINTAINERS: Omar had moved
  mm: compaction: partially revert capture of suitable high-order page
  linux/audit.h: move ptrace.h include to kernel header
  kernel/audit.c: avoid negative sleep durations
  audit: catch possible NULL audit buffers
  audit: create explicit AUDIT_SECCOMP event type
  MAINTAINERS: fix a status pattern
  MAINTAINERS: fix arch/arm/plat-omap/include/plat/omap_hwmod.h
  mm: thp: acquire the anon_vma rwsem for write during split
  mm: mmap: annotate vm_lock_anon_vma locking properly for lockdep
  lockdep, rwsem: provide down_write_nest_lock()
  arch/mn10300/Kconfig: select CONFIG_GENERIC_ATOMIC64
  mm: bootmem: fix free_all_bootmem_core() with odd bitmap alignment
  mm: use aligned zone start for pfn_to_bitidx calculation
  fs/exec.c: work around icc miscompilation
  mm: compaction: fix echo 1 > compact_memory return error issue
  mm: memblock: fix wrong memmove size in memblock_merge_regions()
  drivers/video/ssd1307fb.c: fix bit order bug in the byte translation function
  mm: migrate: check page_count of THP before migrating
  ...
parents 93ccb391 3cb7a563
...@@ -648,7 +648,7 @@ F: arch/arm/ ...@@ -648,7 +648,7 @@ F: arch/arm/
ARM SUB-ARCHITECTURES ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: MAINTAINED S: Maintained
F: arch/arm/mach-*/ F: arch/arm/mach-*/
F: arch/arm/plat-*/ F: arch/arm/plat-*/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
...@@ -5507,8 +5507,7 @@ M: Benoît Cousson <b-cousson@ti.com> ...@@ -5507,8 +5507,7 @@ M: Benoît Cousson <b-cousson@ti.com>
M: Paul Walmsley <paul@pwsan.com> M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
S: Maintained S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.c F: arch/arm/mach-omap2/omap_hwmod.*
F: arch/arm/plat-omap/include/plat/omap_hwmod.h
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
M: Benoît Cousson <b-cousson@ti.com> M: Benoît Cousson <b-cousson@ti.com>
...@@ -7334,7 +7333,7 @@ S: Odd Fixes ...@@ -7334,7 +7333,7 @@ S: Odd Fixes
F: drivers/staging/speakup/ F: drivers/staging/speakup/
STAGING - TI DSP BRIDGE DRIVERS STAGING - TI DSP BRIDGE DRIVERS
M: Omar Ramirez Luna <omar.ramirez@ti.com> M: Omar Ramirez Luna <omar.ramirez@copitl.com>
S: Odd Fixes S: Odd Fixes
F: drivers/staging/tidspbridge/ F: drivers/staging/tidspbridge/
......
...@@ -6,6 +6,7 @@ config MN10300 ...@@ -6,6 +6,7 @@ config MN10300
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select GENERIC_ATOMIC64
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
......
...@@ -227,7 +227,7 @@ static const struct rtc_class_ops da9055_rtc_ops = { ...@@ -227,7 +227,7 @@ static const struct rtc_class_ops da9055_rtc_ops = {
.alarm_irq_enable = da9055_rtc_alarm_irq_enable, .alarm_irq_enable = da9055_rtc_alarm_irq_enable,
}; };
static int __init da9055_rtc_device_init(struct da9055 *da9055, static int da9055_rtc_device_init(struct da9055 *da9055,
struct da9055_pdata *pdata) struct da9055_pdata *pdata)
{ {
int ret; int ret;
......
...@@ -145,8 +145,8 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par) ...@@ -145,8 +145,8 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par)
u32 page_length = SSD1307FB_WIDTH * i; u32 page_length = SSD1307FB_WIDTH * i;
u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8; u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8;
u8 byte = *(vmem + index); u8 byte = *(vmem + index);
u8 bit = byte & (1 << (7 - (j % 8))); u8 bit = byte & (1 << (j % 8));
bit = bit >> (7 - (j % 8)); bit = bit >> (j % 8);
buf |= bit << k; buf |= bit << k;
} }
ssd1307fb_write_data(par->client, buf); ssd1307fb_write_data(par->client, buf);
......
...@@ -434,8 +434,9 @@ static int count(struct user_arg_ptr argv, int max) ...@@ -434,8 +434,9 @@ static int count(struct user_arg_ptr argv, int max)
if (IS_ERR(p)) if (IS_ERR(p))
return -EFAULT; return -EFAULT;
if (i++ >= max) if (i >= max)
return -E2BIG; return -E2BIG;
++i;
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -ERESTARTNOHAND; return -ERESTARTNOHAND;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define _LINUX_AUDIT_H_ #define _LINUX_AUDIT_H_
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h> #include <uapi/linux/audit.h>
struct audit_sig_info { struct audit_sig_info {
...@@ -157,7 +158,8 @@ void audit_core_dumps(long signr); ...@@ -157,7 +158,8 @@ void audit_core_dumps(long signr);
static inline void audit_seccomp(unsigned long syscall, long signr, int code) static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{ {
if (unlikely(!audit_dummy_context())) /* Force a record to be reported if a signal was delivered. */
if (signr || unlikely(!audit_dummy_context()))
__audit_seccomp(syscall, signr, code); __audit_seccomp(syscall, signr, code);
} }
......
...@@ -13,9 +13,11 @@ ...@@ -13,9 +13,11 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kref.h>
/** /**
* struct cpu_rmap - CPU affinity reverse-map * struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped * @size: Number of objects to be reverse-mapped
* @used: Number of objects added * @used: Number of objects added
* @obj: Pointer to array of object pointers * @obj: Pointer to array of object pointers
...@@ -23,6 +25,7 @@ ...@@ -23,6 +25,7 @@
* based on affinity masks * based on affinity masks
*/ */
struct cpu_rmap { struct cpu_rmap {
struct kref refcount;
u16 size, used; u16 size, used;
void **obj; void **obj;
struct { struct {
...@@ -33,15 +36,7 @@ struct cpu_rmap { ...@@ -33,15 +36,7 @@ struct cpu_rmap {
#define CPU_RMAP_DIST_INF 0xffff #define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
extern int cpu_rmap_put(struct cpu_rmap *rmap);
/**
* free_cpu_rmap - free CPU affinity reverse-map
* @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
*/
static inline void free_cpu_rmap(struct cpu_rmap *rmap)
{
kfree(rmap);
}
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
......
...@@ -268,11 +268,6 @@ struct irq_affinity_notify { ...@@ -268,11 +268,6 @@ struct irq_affinity_notify {
extern int extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
static inline void irq_run_affinity_notifiers(void)
{
flush_scheduled_work();
}
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
......
...@@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING # ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else # else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif # endif
# define rwsem_release(l, n, i) lock_release(l, n, i) # define rwsem_release(l, n, i) lock_release(l, n, i)
#else #else
# define rwsem_acquire(l, s, t, i) do { } while (0) # define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0) # define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0) # define rwsem_release(l, n, i) do { } while (0)
#endif #endif
......
...@@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, ...@@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
static __always_inline void static __always_inline struct rb_node *
rb_erase_augmented(struct rb_node *node, struct rb_root *root, __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment) const struct rb_augment_callbacks *augment)
{ {
struct rb_node *child = node->rb_right, *tmp = node->rb_left; struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance; struct rb_node *parent, *rebalance;
...@@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root, ...@@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root,
} }
augment->propagate(tmp, NULL); augment->propagate(tmp, NULL);
return rebalance;
}
static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance) if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate); __rb_erase_color(rebalance, root, augment->rotate);
} }
......
...@@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem); ...@@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem);
*/ */
extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
# define down_write_nest_lock(sem, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
} while (0);
#else #else
# define down_read_nested(sem, subclass) down_read(sem) # define down_read_nested(sem, subclass) down_read(sem)
# define down_write_nest_lock(sem, nest_lock) down_read(sem)
# define down_write_nested(sem, subclass) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem)
#endif #endif
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/elf-em.h> #include <linux/elf-em.h>
#include <linux/ptrace.h>
/* The netlink messages for the audit system is divided into blocks: /* The netlink messages for the audit system is divided into blocks:
* 1000 - 1099 are for commanding the audit system * 1000 - 1099 are for commanding the audit system
...@@ -106,6 +105,7 @@ ...@@ -106,6 +105,7 @@
#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
#define AUDIT_SECCOMP 1326 /* Secure Computing event */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
......
...@@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old, ...@@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old,
int rc = 0; int rc = 0;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return rc;
audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
old, from_kuid(&init_user_ns, loginuid), sessionid); old, from_kuid(&init_user_ns, loginuid), sessionid);
if (sid) { if (sid) {
...@@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, ...@@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
} }
*ab = audit_log_start(NULL, GFP_KERNEL, msg_type); *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
if (unlikely(!*ab))
return rc;
audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
task_tgid_vnr(current), task_tgid_vnr(current),
from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_uid()),
...@@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx, ...@@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx,
} }
} }
/*
* Wait for auditd to drain the queue a little
*/
static void wait_for_auditd(unsigned long sleep_time)
{
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&audit_backlog_wait, &wait);
if (audit_backlog_limit &&
skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
schedule_timeout(sleep_time);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&audit_backlog_wait, &wait);
}
/* Obtain an audit buffer. This routine does locking to obtain the /* Obtain an audit buffer. This routine does locking to obtain the
* audit buffer, but then no locking is required for calls to * audit buffer, but then no locking is required for calls to
* audit_log_*format. If the tsk is a task that is currently in a * audit_log_*format. If the tsk is a task that is currently in a
...@@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, ...@@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
while (audit_backlog_limit while (audit_backlog_limit
&& skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) { && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) {
&& time_before(jiffies, timeout_start + audit_backlog_wait_time)) { unsigned long sleep_time;
/* Wait for auditd to drain the queue a little */ sleep_time = timeout_start + audit_backlog_wait_time -
DECLARE_WAITQUEUE(wait, current); jiffies;
set_current_state(TASK_INTERRUPTIBLE); if ((long)sleep_time > 0)
add_wait_queue(&audit_backlog_wait, &wait); wait_for_auditd(sleep_time);
if (audit_backlog_limit &&
skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&audit_backlog_wait, &wait);
continue; continue;
} }
if (audit_rate_check() && printk_ratelimit()) if (audit_rate_check() && printk_ratelimit())
......
...@@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) ...@@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
return 0; return 0;
} }
static void audit_log_remove_rule(struct audit_krule *rule)
{
struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return;
audit_log_format(ab, "op=");
audit_log_string(ab, "remove rule");
audit_log_format(ab, " dir=");
audit_log_untrustedstring(ab, rule->tree->pathname);
audit_log_key(ab, rule->filterkey);
audit_log_format(ab, " list=%d res=1", rule->listnr);
audit_log_end(ab);
}
static void kill_rules(struct audit_tree *tree) static void kill_rules(struct audit_tree *tree)
{ {
struct audit_krule *rule, *next; struct audit_krule *rule, *next;
struct audit_entry *entry; struct audit_entry *entry;
struct audit_buffer *ab;
list_for_each_entry_safe(rule, next, &tree->rules, rlist) { list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
entry = container_of(rule, struct audit_entry, rule); entry = container_of(rule, struct audit_entry, rule);
...@@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree) ...@@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree)
list_del_init(&rule->rlist); list_del_init(&rule->rlist);
if (rule->tree) { if (rule->tree) {
/* not a half-baked one */ /* not a half-baked one */
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); audit_log_remove_rule(rule);
audit_log_format(ab, "op=");
audit_log_string(ab, "remove rule");
audit_log_format(ab, " dir=");
audit_log_untrustedstring(ab, rule->tree->pathname);
audit_log_key(ab, rule->filterkey);
audit_log_format(ab, " list=%d res=1", rule->listnr);
audit_log_end(ab);
rule->tree = NULL; rule->tree = NULL;
list_del_rcu(&entry->list); list_del_rcu(&entry->list);
list_del(&entry->rule.list); list_del(&entry->rule.list);
......
...@@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc ...@@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
if (audit_enabled) { if (audit_enabled) {
struct audit_buffer *ab; struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return;
audit_log_format(ab, "auid=%u ses=%u op=", audit_log_format(ab, "auid=%u ses=%u op=",
from_kuid(&init_user_ns, audit_get_loginuid(current)), from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current)); audit_get_sessionid(current));
......
...@@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic) ...@@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic)
audit_log_end(ab); audit_log_end(ab);
ab = audit_log_start(context, GFP_KERNEL, ab = audit_log_start(context, GFP_KERNEL,
AUDIT_IPC_SET_PERM); AUDIT_IPC_SET_PERM);
if (unlikely(!ab))
return;
audit_log_format(ab, audit_log_format(ab,
"qbytes=%lx ouid=%u ogid=%u mode=%#ho", "qbytes=%lx ouid=%u ogid=%u mode=%#ho",
context->ipc.qbytes, context->ipc.qbytes,
context->ipc.perm_uid, context->ipc.perm_uid,
context->ipc.perm_gid, context->ipc.perm_gid,
context->ipc.perm_mode); context->ipc.perm_mode);
if (!ab)
return;
} }
break; } break; }
case AUDIT_MQ_OPEN: { case AUDIT_MQ_OPEN: {
...@@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags) ...@@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags)
context->type = AUDIT_MMAP; context->type = AUDIT_MMAP;
} }
static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) static void audit_log_task(struct audit_buffer *ab)
{ {
kuid_t auid, uid; kuid_t auid, uid;
kgid_t gid; kgid_t gid;
...@@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) ...@@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
audit_log_task_context(ab); audit_log_task_context(ab);
audit_log_format(ab, " pid=%d comm=", current->pid); audit_log_format(ab, " pid=%d comm=", current->pid);
audit_log_untrustedstring(ab, current->comm); audit_log_untrustedstring(ab, current->comm);
}
static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
{
audit_log_task(ab);
audit_log_format(ab, " reason="); audit_log_format(ab, " reason=");
audit_log_string(ab, reason); audit_log_string(ab, reason);
audit_log_format(ab, " sig=%ld", signr); audit_log_format(ab, " sig=%ld", signr);
...@@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr) ...@@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr)
return; return;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
if (unlikely(!ab))
return;
audit_log_abend(ab, "memory violation", signr); audit_log_abend(ab, "memory violation", signr);
audit_log_end(ab); audit_log_end(ab);
} }
...@@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code) ...@@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code)
{ {
struct audit_buffer *ab; struct audit_buffer *ab;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
audit_log_abend(ab, "seccomp", signr); if (unlikely(!ab))
return;
audit_log_task(ab);
audit_log_format(ab, " sig=%ld", signr);
audit_log_format(ab, " syscall=%ld", syscall); audit_log_format(ab, " syscall=%ld", syscall);
audit_log_format(ab, " compat=%d", is_compat_task()); audit_log_format(ab, " compat=%d", is_compat_task());
audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current)); audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
......
...@@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) ...@@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
EXPORT_SYMBOL(down_read_nested); EXPORT_SYMBOL(down_read_nested);
void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
{
might_sleep();
rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(_down_write_nest_lock);
void down_write_nested(struct rw_semaphore *sem, int subclass) void down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
might_sleep(); might_sleep();
......
...@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) ...@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
if (!rmap) if (!rmap)
return NULL; return NULL;
kref_init(&rmap->refcount);
rmap->obj = (void **)((char *)rmap + obj_offset); rmap->obj = (void **)((char *)rmap + obj_offset);
/* Initially assign CPUs to objects on a rota, since we have /* Initially assign CPUs to objects on a rota, since we have
...@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) ...@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
} }
EXPORT_SYMBOL(alloc_cpu_rmap); EXPORT_SYMBOL(alloc_cpu_rmap);
/**
* cpu_rmap_release - internal reclaiming helper called from kref_put
* @ref: kref to struct cpu_rmap
*/
static void cpu_rmap_release(struct kref *ref)
{
struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
kfree(rmap);
}
/**
* cpu_rmap_get - internal helper to get new ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
static inline void cpu_rmap_get(struct cpu_rmap *rmap)
{
kref_get(&rmap->refcount);
}
/**
* cpu_rmap_put - release ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
int cpu_rmap_put(struct cpu_rmap *rmap)
{
return kref_put(&rmap->refcount, cpu_rmap_release);
}
EXPORT_SYMBOL(cpu_rmap_put);
/* Reevaluate nearest object for given CPU, comparing with the given /* Reevaluate nearest object for given CPU, comparing with the given
* neighbours at the given distance. * neighbours at the given distance.
*/ */
...@@ -197,8 +227,7 @@ struct irq_glue { ...@@ -197,8 +227,7 @@ struct irq_glue {
* free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
* @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
* *
* Must be called in process context, before freeing the IRQs, and * Must be called in process context, before freeing the IRQs.
* without holding any locks required by global workqueue items.
*/ */
void free_irq_cpu_rmap(struct cpu_rmap *rmap) void free_irq_cpu_rmap(struct cpu_rmap *rmap)
{ {
...@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) ...@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
glue = rmap->obj[index]; glue = rmap->obj[index];
irq_set_affinity_notifier(glue->notify.irq, NULL); irq_set_affinity_notifier(glue->notify.irq, NULL);
} }
irq_run_affinity_notifiers();
kfree(rmap); cpu_rmap_put(rmap);
} }
EXPORT_SYMBOL(free_irq_cpu_rmap); EXPORT_SYMBOL(free_irq_cpu_rmap);
/**
* irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
* @notify: struct irq_affinity_notify passed by irq/manage.c
* @mask: cpu mask for new SMP affinity
*
* This is executed in workqueue context.
*/
static void static void
irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
{ {
...@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) ...@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
} }
/**
* irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
* @ref: kref to struct irq_affinity_notify passed by irq/manage.c
*/
static void irq_cpu_rmap_release(struct kref *ref) static void irq_cpu_rmap_release(struct kref *ref)
{ {
struct irq_glue *glue = struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref); container_of(ref, struct irq_glue, notify.kref);
cpu_rmap_put(glue->rmap);
kfree(glue); kfree(glue);
} }
...@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) ...@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
glue->notify.notify = irq_cpu_rmap_notify; glue->notify.notify = irq_cpu_rmap_notify;
glue->notify.release = irq_cpu_rmap_release; glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap; glue->rmap = rmap;
cpu_rmap_get(rmap);
glue->index = cpu_rmap_add(rmap, glue); glue->index = cpu_rmap_add(rmap, glue);
rc = irq_set_affinity_notifier(irq, &glue->notify); rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc) if (rc) {
cpu_rmap_put(glue->rmap);
kfree(glue); kfree(glue);
}
return rc; return rc;
} }
EXPORT_SYMBOL(irq_cpu_rmap_add); EXPORT_SYMBOL(irq_cpu_rmap_add);
......
...@@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root, ...@@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
} }
} }
__always_inline void /*
__rb_erase_color(struct rb_node *parent, struct rb_root *root, * Inline version for rb_erase() use - we want to be able to inline
* and eliminate the dummy_rotate callback there
*/
static __always_inline void
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{ {
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
...@@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, ...@@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root,
} }
} }
} }
/* Non-inline version for rb_erase_augmented() use */
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
____rb_erase_color(parent, root, augment_rotate);
}
EXPORT_SYMBOL(__rb_erase_color); EXPORT_SYMBOL(__rb_erase_color);
/* /*
...@@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color); ...@@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color);
void rb_erase(struct rb_node *node, struct rb_root *root) void rb_erase(struct rb_node *node, struct rb_root *root)
{ {
rb_erase_augmented(node, root, &dummy_callbacks); struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
} }
EXPORT_SYMBOL(rb_erase); EXPORT_SYMBOL(rb_erase);
......
...@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) ...@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
while (start < end) { while (start < end) {
unsigned long *map, idx, vec; unsigned long *map, idx, vec;
unsigned shift;
map = bdata->node_bootmem_map; map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn; idx = start - bdata->node_min_pfn;
shift = idx & (BITS_PER_LONG - 1);
/*
* vec holds at most BITS_PER_LONG map bits,
* bit 0 corresponds to start.
*/
vec = ~map[idx / BITS_PER_LONG]; vec = ~map[idx / BITS_PER_LONG];
if (shift) {
vec >>= shift;
if (end - start >= BITS_PER_LONG)
vec |= ~map[idx / BITS_PER_LONG + 1] <<
(BITS_PER_LONG - shift);
}
/* /*
* If we have a properly aligned and fully unreserved * If we have a properly aligned and fully unreserved
* BITS_PER_LONG block of pages in front of us, free * BITS_PER_LONG block of pages in front of us, free
...@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) ...@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
count += BITS_PER_LONG; count += BITS_PER_LONG;
start += BITS_PER_LONG; start += BITS_PER_LONG;
} else { } else {
unsigned long off = 0; unsigned long cur = start;
vec >>= start & (BITS_PER_LONG - 1); start = ALIGN(start + 1, BITS_PER_LONG);
while (vec) { while (vec && cur != start) {
if (vec & 1) { if (vec & 1) {
page = pfn_to_page(start + off); page = pfn_to_page(cur);
__free_pages_bootmem(page, 0); __free_pages_bootmem(page, 0);
count++; count++;
} }
vec >>= 1; vec >>= 1;
off++; ++cur;
} }
start = ALIGN(start + 1, BITS_PER_LONG);
} }
} }
......
...@@ -1144,7 +1144,7 @@ static int compact_node(int nid) ...@@ -1144,7 +1144,7 @@ static int compact_node(int nid)
} }
/* Compact all nodes in the system */ /* Compact all nodes in the system */
static int compact_nodes(void) static void compact_nodes(void)
{ {
int nid; int nid;
...@@ -1153,8 +1153,6 @@ static int compact_nodes(void) ...@@ -1153,8 +1153,6 @@ static int compact_nodes(void)
for_each_online_node(nid) for_each_online_node(nid)
compact_node(nid); compact_node(nid);
return COMPACT_COMPLETE;
} }
/* The written value is actually unused, all memory is compacted */ /* The written value is actually unused, all memory is compacted */
...@@ -1165,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write, ...@@ -1165,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos) void __user *buffer, size_t *length, loff_t *ppos)
{ {
if (write) if (write)
return compact_nodes(); compact_nodes();
return 0; return 0;
} }
......
...@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page) ...@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page)
BUG_ON(is_huge_zero_pfn(page_to_pfn(page))); BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
BUG_ON(!PageAnon(page)); BUG_ON(!PageAnon(page));
anon_vma = page_lock_anon_vma_read(page);
/*
* The caller does not necessarily hold an mmap_sem that would prevent
* the anon_vma disappearing so we first we take a reference to it
* and then lock the anon_vma for write. This is similar to
* page_lock_anon_vma_read except the write lock is taken to serialise
* against parallel split or collapse operations.
*/
anon_vma = page_get_anon_vma(page);
if (!anon_vma) if (!anon_vma)
goto out; goto out;
anon_vma_lock_write(anon_vma);
ret = 0; ret = 0;
if (!PageCompound(page)) if (!PageCompound(page))
goto out_unlock; goto out_unlock;
...@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page) ...@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page)
BUG_ON(PageCompound(page)); BUG_ON(PageCompound(page));
out_unlock: out_unlock:
page_unlock_anon_vma_read(anon_vma); anon_vma_unlock(anon_vma);
put_anon_vma(anon_vma);
out: out:
return ret; return ret;
} }
......
...@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) ...@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
} }
this->size += next->size; this->size += next->size;
memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); /* move forward from next + 1, index of which is i + 2 */
memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
type->cnt--; type->cnt--;
} }
} }
......
...@@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
page_xchg_last_nid(new_page, page_last_nid(page)); page_xchg_last_nid(new_page, page_last_nid(page));
isolated = numamigrate_isolate_page(pgdat, page); isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
/*
* Failing to isolate or a GUP pin prevents migration. The expected
* page count is 2. 1 for anonymous pages without a mapping and 1
* for the callers pin. If the page was isolated, the page will
* need to be put back on the LRU.
*/
if (!isolated || page_count(page) != 2) {
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
put_page(new_page); put_page(new_page);
if (isolated) {
putback_lru_page(page);
isolated = 0;
goto out;
}
goto out_keep_locked; goto out_keep_locked;
} }
......
...@@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) ...@@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us * The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex. * because we hold the mm_all_locks_mutex.
*/ */
down_write(&anon_vma->root->rwsem); down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
/* /*
* We can safely modify head.next after taking the * We can safely modify head.next after taking the
* anon_vma->root->rwsem. If some other vma in this mm shares * anon_vma->root->rwsem. If some other vma in this mm shares
......
...@@ -5585,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) ...@@ -5585,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
pfn &= (PAGES_PER_SECTION-1); pfn &= (PAGES_PER_SECTION-1);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#else #else
pfn = pfn - zone->zone_start_pfn; pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment