Commit d46d0256 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge various fixes from Andrew Morton:
 "10 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm, page_alloc: recalculate the preferred zoneref if the context can ignore memory policies
  mm, page_alloc: reset zonelist iterator after resetting fair zone allocation policy
  mm, oom_reaper: do not use siglock in try_oom_reaper()
  mm, page_alloc: prevent infinite loop in buffered_rmqueue()
  checkpatch: reduce git commit description style false positives
  mm/z3fold.c: avoid modifying HEADLESS page and minor cleanup
  memcg: add RCU locking around css_for_each_descendant_pre() in memcg_offline_kmem()
  mm: check the return value of lookup_page_ext for all call sites
  kdump: fix dmesg gdbmacro to work with record based printk
  mm: fix overflow in vm_map_ram()
parents 8c52b6dc e46e7b77
...@@ -170,21 +170,92 @@ document trapinfo ...@@ -170,21 +170,92 @@ document trapinfo
address the kernel panicked. address the kernel panicked.
end end
define dump_log_idx
set $idx = $arg0
if ($argc > 1)
set $prev_flags = $arg1
else
set $prev_flags = 0
end
set $msg = ((struct printk_log *) (log_buf + $idx))
set $prefix = 1
set $newline = 1
set $log = log_buf + $idx + sizeof(*$msg)
define dmesg # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
set $i = 0 if (($prev_flags & 8) && !($msg->flags & 4))
set $end_idx = (log_end - 1) & (log_buf_len - 1) set $prefix = 0
end
# msg->flags & LOG_CONT
if ($msg->flags & 8)
# (prev & LOG_CONT && !(prev & LOG_NEWLINE))
if (($prev_flags & 8) && !($prev_flags & 2))
set $prefix = 0
end
# (!(msg->flags & LOG_NEWLINE))
if (!($msg->flags & 2))
set $newline = 0
end
end
if ($prefix)
printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
end
if ($msg->text_len != 0)
eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
end
if ($newline)
printf "\n"
end
if ($msg->dict_len > 0)
set $dict = $log + $msg->text_len
set $idx = 0
set $line = 1
while ($idx < $msg->dict_len)
if ($line)
printf " "
set $line = 0
end
set $c = $dict[$idx]
if ($c == '\0')
printf "\n"
set $line = 1
else
if ($c < ' ' || $c >= 127 || $c == '\\')
printf "\\x%02x", $c
else
printf "%c", $c
end
end
set $idx = $idx + 1
end
printf "\n"
end
end
document dump_log_idx
Dump a single log given its index in the log buffer. The first
parameter is the index into log_buf, the second is optional and
specified the previous log buffer's flags, used for properly
formatting continued lines.
end
while ($i < logged_chars) define dmesg
set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) set $i = log_first_idx
set $end_idx = log_first_idx
set $prev_flags = 0
if ($idx + 100 <= $end_idx) || \ while (1)
($end_idx <= $idx && $idx + 100 < log_buf_len) set $msg = ((struct printk_log *) (log_buf + $i))
printf "%.100s", &log_buf[$idx] if ($msg->len == 0)
set $i = $i + 100 set $i = 0
else else
printf "%c", log_buf[$idx] dump_log_idx $i $prev_flags
set $i = $i + 1 set $i = $i + $msg->len
set $prev_flags = $msg->flags
end
if ($i == $end_idx)
loop_break
end end
end end
end end
......
...@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; ...@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
static inline bool page_is_young(struct page *page) static inline bool page_is_young(struct page *page)
{ {
return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
} }
static inline void set_page_young(struct page *page) static inline void set_page_young(struct page *page)
{ {
set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
} }
static inline bool test_and_clear_page_young(struct page *page) static inline bool test_and_clear_page_young(struct page *page)
{ {
return test_and_clear_bit(PAGE_EXT_YOUNG, struct page_ext *page_ext = lookup_page_ext(page);
&lookup_page_ext(page)->flags);
if (unlikely(!page_ext))
return false;
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
} }
static inline bool page_is_idle(struct page *page) static inline bool page_is_idle(struct page *page)
{ {
return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
} }
static inline void set_page_idle(struct page *page) static inline void set_page_idle(struct page *page)
{ {
set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
} }
static inline void clear_page_idle(struct page *page) static inline void clear_page_idle(struct page *page)
{ {
clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
} }
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
......
...@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) ...@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
* ordering is imposed by list_lru_node->lock taken by * ordering is imposed by list_lru_node->lock taken by
* memcg_drain_all_list_lrus(). * memcg_drain_all_list_lrus().
*/ */
rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
css_for_each_descendant_pre(css, &memcg->css) { css_for_each_descendant_pre(css, &memcg->css) {
child = mem_cgroup_from_css(css); child = mem_cgroup_from_css(css);
BUG_ON(child->kmemcg_id != kmemcg_id); BUG_ON(child->kmemcg_id != kmemcg_id);
...@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) ...@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
if (!memcg->use_hierarchy) if (!memcg->use_hierarchy)
break; break;
} }
rcu_read_unlock();
memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
memcg_free_cache_id(kmemcg_id); memcg_free_cache_id(kmemcg_id);
......
...@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk) ...@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk)
if (atomic_read(&mm->mm_users) > 1) { if (atomic_read(&mm->mm_users) > 1) {
rcu_read_lock(); rcu_read_lock();
for_each_process(p) { for_each_process(p) {
bool exiting;
if (!process_shares_mm(p, mm)) if (!process_shares_mm(p, mm))
continue; continue;
if (fatal_signal_pending(p)) if (fatal_signal_pending(p))
...@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk) ...@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk)
* If the task is exiting make sure the whole thread group * If the task is exiting make sure the whole thread group
* is exiting and cannot acces mm anymore. * is exiting and cannot acces mm anymore.
*/ */
spin_lock_irq(&p->sighand->siglock); if (signal_group_exit(p->signal))
exiting = signal_group_exit(p->signal);
spin_unlock_irq(&p->sighand->siglock);
if (exiting)
continue; continue;
/* Give up */ /* Give up */
......
...@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, ...@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
return; return;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
INIT_LIST_HEAD(&page->lru); INIT_LIST_HEAD(&page->lru);
...@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, ...@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
return; return;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
set_page_private(page, 0); set_page_private(page, 0);
...@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
page = list_last_entry(list, struct page, lru); page = list_last_entry(list, struct page, lru);
else else
page = list_first_entry(list, struct page, lru); page = list_first_entry(list, struct page, lru);
} while (page && check_new_pcp(page));
__dec_zone_state(zone, NR_ALLOC_BATCH); __dec_zone_state(zone, NR_ALLOC_BATCH);
list_del(&page->lru); list_del(&page->lru);
pcp->count--; pcp->count--;
} while (check_new_pcp(page));
} else { } else {
/* /*
* We most definitely don't want callers attempting to * We most definitely don't want callers attempting to
...@@ -3023,6 +3030,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, ...@@ -3023,6 +3030,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
apply_fair = false; apply_fair = false;
fair_skipped = false; fair_skipped = false;
reset_alloc_batches(ac->preferred_zoneref->zone); reset_alloc_batches(ac->preferred_zoneref->zone);
z = ac->preferred_zoneref;
goto zonelist_scan; goto zonelist_scan;
} }
...@@ -3596,6 +3604,17 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3596,6 +3604,17 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/ */
alloc_flags = gfp_to_alloc_flags(gfp_mask); alloc_flags = gfp_to_alloc_flags(gfp_mask);
/*
* Reset the zonelist iterators if memory policies can be ignored.
* These allocations are high priority and system rather than user
* orientated.
*/
if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->high_zoneidx, ac->nodemask);
}
/* This is the last chance, in general, before the goto nopage. */ /* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order,
alloc_flags & ~ALLOC_NO_WATERMARKS, ac); alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
...@@ -3604,12 +3623,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3604,12 +3623,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
/* Allocate without watermarks if the context allows */ /* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) { if (alloc_flags & ALLOC_NO_WATERMARKS) {
/*
* Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
* the allocation is high priority and these type of
* allocations are system rather than user orientated
*/
ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac); ALLOC_NO_WATERMARKS, ac);
if (page) if (page)
...@@ -3808,7 +3821,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -3808,7 +3821,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
/* Dirty zone balancing only done in the fast path */ /* Dirty zone balancing only done in the fast path */
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
/* The preferred zone is used for statistics later */ /*
* The preferred zone is used for statistics but crucially it is
* also used as the starting point for the zonelist iterator. It
* may get reset for allocations that ignore memory policies.
*/
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
ac.high_zoneidx, ac.nodemask); ac.high_zoneidx, ac.nodemask);
if (!ac.preferred_zoneref) { if (!ac.preferred_zoneref) {
......
...@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order) ...@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
for (i = 0; i < (1 << order); i++) { for (i = 0; i < (1 << order); i++) {
page_ext = lookup_page_ext(page + i); page_ext = lookup_page_ext(page + i);
if (unlikely(!page_ext))
continue;
__clear_bit(PAGE_EXT_OWNER, &page_ext->flags); __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
} }
} }
...@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order) ...@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
{ {
struct page_ext *page_ext = lookup_page_ext(page); struct page_ext *page_ext = lookup_page_ext(page);
struct stack_trace trace = { struct stack_trace trace = {
.nr_entries = 0, .nr_entries = 0,
.max_entries = ARRAY_SIZE(page_ext->trace_entries), .max_entries = ARRAY_SIZE(page_ext->trace_entries),
...@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) ...@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
.skip = 3, .skip = 3,
}; };
if (unlikely(!page_ext))
return;
save_stack_trace(&trace); save_stack_trace(&trace);
page_ext->order = order; page_ext->order = order;
...@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) ...@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
void __set_page_owner_migrate_reason(struct page *page, int reason) void __set_page_owner_migrate_reason(struct page *page, int reason)
{ {
struct page_ext *page_ext = lookup_page_ext(page); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
page_ext->last_migrate_reason = reason; page_ext->last_migrate_reason = reason;
} }
...@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) ...@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
gfp_t __get_page_owner_gfp(struct page *page) gfp_t __get_page_owner_gfp(struct page *page)
{ {
struct page_ext *page_ext = lookup_page_ext(page); struct page_ext *page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
/*
* The caller just returns 0 if no valid gfp
* So return 0 here too.
*/
return 0;
return page_ext->gfp_mask; return page_ext->gfp_mask;
} }
...@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) ...@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
struct page_ext *new_ext = lookup_page_ext(newpage); struct page_ext *new_ext = lookup_page_ext(newpage);
int i; int i;
if (unlikely(!old_ext || !new_ext))
return;
new_ext->order = old_ext->order; new_ext->order = old_ext->order;
new_ext->gfp_mask = old_ext->gfp_mask; new_ext->gfp_mask = old_ext->gfp_mask;
new_ext->nr_entries = old_ext->nr_entries; new_ext->nr_entries = old_ext->nr_entries;
...@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page) ...@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
gfp_t gfp_mask = page_ext->gfp_mask; gfp_t gfp_mask = page_ext->gfp_mask;
int mt = gfpflags_to_migratetype(gfp_mask); int mt = gfpflags_to_migratetype(gfp_mask);
if (unlikely(!page_ext)) {
pr_alert("There is not page extension available.\n");
return;
}
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
pr_alert("page_owner info is not active (free page?)\n"); pr_alert("page_owner info is not active (free page?)\n");
return; return;
...@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) ...@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
} }
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
continue;
/* /*
* Some pages could be missed by concurrent allocation or free, * Some pages could be missed by concurrent allocation or free,
...@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) ...@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue; continue;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
continue;
/* Maybe overraping zone */ /* Maybe overraping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
......
...@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page) ...@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
struct page_ext *page_ext; struct page_ext *page_ext;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
} }
...@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page) ...@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
struct page_ext *page_ext; struct page_ext *page_ext;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
return;
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
} }
...@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page) ...@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
struct page_ext *page_ext; struct page_ext *page_ext;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (!page_ext) if (unlikely(!page_ext))
return false; return false;
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
......
...@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases); ...@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
*/ */
void vm_unmap_ram(const void *mem, unsigned int count) void vm_unmap_ram(const void *mem, unsigned int count)
{ {
unsigned long size = count << PAGE_SHIFT; unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr = (unsigned long)mem; unsigned long addr = (unsigned long)mem;
BUG_ON(!addr); BUG_ON(!addr);
...@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram); ...@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
*/ */
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{ {
unsigned long size = count << PAGE_SHIFT; unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr; unsigned long addr;
void *mem; void *mem;
...@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot) unsigned long flags, pgprot_t prot)
{ {
struct vm_struct *area; struct vm_struct *area;
unsigned long size; /* In bytes */
might_sleep(); might_sleep();
if (count > totalram_pages) if (count > totalram_pages)
return NULL; return NULL;
area = get_vm_area_caller((count << PAGE_SHIFT), flags, size = (unsigned long)count << PAGE_SHIFT;
__builtin_return_address(0)); area = get_vm_area_caller(size, flags, __builtin_return_address(0));
if (!area) if (!area)
return NULL; return NULL;
......
...@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, ...@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
continue; continue;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext))
continue;
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue; continue;
......
...@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) ...@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
/* HEADLESS page stored */ /* HEADLESS page stored */
bud = HEADLESS; bud = HEADLESS;
} else { } else {
bud = (handle - zhdr->first_num) & BUDDY_MASK; bud = handle_to_buddy(handle);
switch (bud) { switch (bud) {
case FIRST: case FIRST:
...@@ -572,15 +572,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) ...@@ -572,15 +572,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
pool->pages_nr--; pool->pages_nr--;
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return 0; return 0;
} else if (zhdr->first_chunks != 0 && } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { if (zhdr->first_chunks != 0 &&
/* Full, add to buddied list */ zhdr->last_chunks != 0 &&
list_add(&zhdr->buddy, &pool->buddied); zhdr->middle_chunks != 0) {
} else if (!test_bit(PAGE_HEADLESS, &page->private)) { /* Full, add to buddied list */
z3fold_compact_page(zhdr); list_add(&zhdr->buddy, &pool->buddied);
/* add to unbuddied list */ } else {
freechunks = num_free_chunks(zhdr); z3fold_compact_page(zhdr);
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); /* add to unbuddied list */
freechunks = num_free_chunks(zhdr);
list_add(&zhdr->buddy,
&pool->unbuddied[freechunks]);
}
} }
/* add to beginning of LRU */ /* add to beginning of LRU */
......
...@@ -2454,6 +2454,7 @@ sub process { ...@@ -2454,6 +2454,7 @@ sub process {
# Check for git id commit length and improperly formed commit descriptions # Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump && if ($in_commit_log && !$commit_log_possible_stack_dump &&
$line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
($line =~ /\b[0-9a-f]{12,40}\b/i && ($line =~ /\b[0-9a-f]{12,40}\b/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment