Commit cc079039 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "Three fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/ksm.c: don't WARN if page is still mapped in remove_stable_node()
  mm/memory_hotplug: don't access uninitialized memmaps in shrink_zone_span()
  Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
parents a6b0373f 9a63236f
...@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, ...@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi); return loc->xl_ops->xlo_check_space(loc, xi);
} }
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
loc->xl_ops->xlo_add_entry(loc, name_hash);
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
/*
* We can't leave the new entry's xe_name_offset at zero or
* add_namevalue() will go nuts. We set it to the size of our
* storage so that it can never be less than any other entry.
*/
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
}
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi) struct ocfs2_xattr_info *xi)
{ {
...@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, ...@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc) if (rc)
goto out; goto out;
if (!loc->xl_entry) { if (loc->xl_entry) {
rc = -EINVAL; if (ocfs2_xa_can_reuse_entry(loc, xi)) {
goto out; orig_value_size = loc->xl_entry->xe_value_size;
} rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
if (ocfs2_xa_can_reuse_entry(loc, xi)) { goto out;
orig_value_size = loc->xl_entry->xe_value_size; goto alloc_value;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); }
if (rc)
goto out;
goto alloc_value;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) { if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc); orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt); rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) { if (rc) {
mlog_errno(rc); mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc, ocfs2_xa_cleanup_value_truncate(loc,
"overwriting", "overwriting",
orig_clusters); orig_clusters);
goto out; goto out;
}
} }
} ocfs2_xa_wipe_namevalue(loc);
ocfs2_xa_wipe_namevalue(loc); } else
ocfs2_xa_add_entry(loc, name_hash);
/* /*
* If we get here, we have a blank entry. Fill it. We grow our * If we get here, we have a blank entry. Fill it. We grow our
......
...@@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node) ...@@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node)
return 0; return 0;
} }
if (WARN_ON_ONCE(page_mapped(page))) { /*
/* * Page could be still mapped if this races with __mmput() running in
* This should not happen: but if it does, just refuse to let * between ksm_exit() and exit_mmap(). Just refuse to let
* merge_across_nodes be switched - there is no need to panic. * merge_across_nodes/max_page_sharing be switched.
*/ */
err = -EBUSY; err = -EBUSY;
} else { if (!page_mapped(page)) {
/* /*
* The stable node did not yet appear stale to get_ksm_page(), * The stable node did not yet appear stale to get_ksm_page(),
* since that allows for an unmapped ksm page to be recognized * since that allows for an unmapped ksm page to be recognized
......
...@@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, ...@@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long end_pfn) unsigned long end_pfn)
{ {
for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_valid(start_pfn))) if (unlikely(!pfn_to_online_page(start_pfn)))
continue; continue;
if (unlikely(pfn_to_nid(start_pfn) != nid)) if (unlikely(pfn_to_nid(start_pfn) != nid))
...@@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, ...@@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
/* pfn is the end pfn of a memory section. */ /* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1; pfn = end_pfn - 1;
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_valid(pfn))) if (unlikely(!pfn_to_online_page(pfn)))
continue; continue;
if (unlikely(pfn_to_nid(pfn) != nid)) if (unlikely(pfn_to_nid(pfn) != nid))
...@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, ...@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
*/ */
pfn = zone_start_pfn; pfn = zone_start_pfn;
for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) { for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
if (unlikely(!pfn_valid(pfn))) if (unlikely(!pfn_to_online_page(pfn)))
continue; continue;
if (page_zone(pfn_to_page(pfn)) != zone) if (page_zone(pfn_to_page(pfn)) != zone)
...@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, ...@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
struct pglist_data *pgdat = zone->zone_pgdat; struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long flags; unsigned long flags;
#ifdef CONFIG_ZONE_DEVICE
/*
* Zone shrinking code cannot properly deal with ZONE_DEVICE. So
* we will not try to shrink the zones - which is okay as
* set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
*/
if (zone_idx(zone) == ZONE_DEVICE)
return;
#endif
pgdat_resize_lock(zone->zone_pgdat, &flags); pgdat_resize_lock(zone->zone_pgdat, &flags);
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
update_pgdat_span(pgdat); update_pgdat_span(pgdat);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment