Commit 2e67c562 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (incoming from Andrew)

Merge patches from Andrew Morton:
 "Six fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  lib/percpu_counter.c: fix __percpu_counter_add()
  crash_dump: fix compilation error (on MIPS at least)
  mm: fix crash when using XFS on loopback
  MIPS: fix blast_icache32 on loongson2
  MIPS: fix case mismatch in local_r4k_flush_icache_range()
  nilfs2: fix segctor bug that causes file system corruption
parents 1a60864f 74e72f89
...@@ -83,6 +83,6 @@ ...@@ -83,6 +83,6 @@
/* /*
* Loongson2-specific cacheops * Loongson2-specific cacheops
*/ */
#define Hit_Invalidate_I_Loongson23 0x00 #define Hit_Invalidate_I_Loongson2 0x00
#endif /* __ASM_CACHEOPS_H */ #endif /* __ASM_CACHEOPS_H */
...@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr) ...@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr)
__iflush_prologue __iflush_prologue
switch (boot_cpu_type()) { switch (boot_cpu_type()) {
case CPU_LOONGSON2: case CPU_LOONGSON2:
cache_op(Hit_Invalidate_I_Loongson23, addr); cache_op(Hit_Invalidate_I_Loongson2, addr);
break; break;
default: default:
...@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr) ...@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr)
{ {
switch (boot_cpu_type()) { switch (boot_cpu_type()) {
case CPU_LOONGSON2: case CPU_LOONGSON2:
protected_cache_op(Hit_Invalidate_I_Loongson23, addr); protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
break; break;
default: default:
...@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr) ...@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr)
"i" (op)); "i" (op));
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
static inline void blast_##pfx##cache##lsize(void) \ static inline void extra##blast_##pfx##cache##lsize(void) \
{ \ { \
unsigned long start = INDEX_BASE; \ unsigned long start = INDEX_BASE; \
unsigned long end = start + current_cpu_data.desc.waysize; \ unsigned long end = start + current_cpu_data.desc.waysize; \
...@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \ ...@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \
__##pfx##flush_epilogue \ __##pfx##flush_epilogue \
} \ } \
\ \
static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
{ \ { \
unsigned long start = page; \ unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \ unsigned long end = page + PAGE_SIZE; \
...@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ ...@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
__##pfx##flush_epilogue \ __##pfx##flush_epilogue \
} \ } \
\ \
static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
{ \ { \
unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
unsigned long start = INDEX_BASE + (page & indexmask); \ unsigned long start = INDEX_BASE + (page & indexmask); \
...@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) ...@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
__##pfx##flush_epilogue \ __##pfx##flush_epilogue \
} }
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
/* build blast_xxx_range, protected_blast_xxx_range */ /* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
...@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, ...@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
protected_, loongson23_) protected_, loongson2_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */ /* blast_inv_dcache_range */
......
...@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void) ...@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void)
r4k_blast_icache_page = (void *)cache_noop; r4k_blast_icache_page = (void *)cache_noop;
else if (ic_lsize == 16) else if (ic_lsize == 16)
r4k_blast_icache_page = blast_icache16_page; r4k_blast_icache_page = blast_icache16_page;
else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
r4k_blast_icache_page = loongson2_blast_icache32_page;
else if (ic_lsize == 32) else if (ic_lsize == 32)
r4k_blast_icache_page = blast_icache32_page; r4k_blast_icache_page = blast_icache32_page;
else if (ic_lsize == 64) else if (ic_lsize == 64)
...@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void) ...@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void)
else if (TX49XX_ICACHE_INDEX_INV_WAR) else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache_page_indexed = r4k_blast_icache_page_indexed =
tx49_blast_icache32_page_indexed; tx49_blast_icache32_page_indexed;
else if (current_cpu_type() == CPU_LOONGSON2)
r4k_blast_icache_page_indexed =
loongson2_blast_icache32_page_indexed;
else else
r4k_blast_icache_page_indexed = r4k_blast_icache_page_indexed =
blast_icache32_page_indexed; blast_icache32_page_indexed;
...@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void) ...@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void)
r4k_blast_icache = blast_r4600_v1_icache32; r4k_blast_icache = blast_r4600_v1_icache32;
else if (TX49XX_ICACHE_INDEX_INV_WAR) else if (TX49XX_ICACHE_INDEX_INV_WAR)
r4k_blast_icache = tx49_blast_icache32; r4k_blast_icache = tx49_blast_icache32;
else if (current_cpu_type() == CPU_LOONGSON2)
r4k_blast_icache = loongson2_blast_icache32;
else else
r4k_blast_icache = blast_icache32; r4k_blast_icache = blast_icache32;
} else if (ic_lsize == 64) } else if (ic_lsize == 64)
...@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo ...@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
else { else {
switch (boot_cpu_type()) { switch (boot_cpu_type()) {
case CPU_LOONGSON2: case CPU_LOONGSON2:
protected_blast_icache_range(start, end); protected_loongson2_blast_icache_range(start, end);
break; break;
default: default:
protected_loongson23_blast_icache_range(start, end); protected_blast_icache_range(start, end);
break; break;
} }
} }
......
...@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, ...@@ -1440,17 +1440,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
nilfs_clear_logs(&sci->sc_segbufs); nilfs_clear_logs(&sci->sc_segbufs);
err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
if (unlikely(err))
return err;
if (sci->sc_stage.flags & NILFS_CF_SUFREED) { if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
sci->sc_freesegs, sci->sc_freesegs,
sci->sc_nfreesegs, sci->sc_nfreesegs,
NULL); NULL);
WARN_ON(err); /* do not happen */ WARN_ON(err); /* do not happen */
sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
} }
err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
if (unlikely(err))
return err;
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage; sci->sc_stage = prev_stage;
} }
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/pgtable.h> /* for pgprot_t */
#define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL) #define ELFCORE_ADDR_ERR (-2ULL)
......
...@@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) ...@@ -82,10 +82,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags); raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count; fbc->count += count;
__this_cpu_sub(*fbc->counters, count);
raw_spin_unlock_irqrestore(&fbc->lock, flags); raw_spin_unlock_irqrestore(&fbc->lock, flags);
__this_cpu_write(*fbc->counters, 0);
} else { } else {
__this_cpu_write(*fbc->counters, count); this_cpu_add(*fbc->counters, amount);
} }
preempt_enable(); preempt_enable();
} }
......
...@@ -390,7 +390,10 @@ struct address_space *page_mapping(struct page *page) ...@@ -390,7 +390,10 @@ struct address_space *page_mapping(struct page *page)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page)); /* This happens if someone calls flush_dcache_page on slab page */
if (unlikely(PageSlab(page)))
return NULL;
if (unlikely(PageSwapCache(page))) { if (unlikely(PageSwapCache(page))) {
swp_entry_t entry; swp_entry_t entry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment