Commit 2cb82400 authored by Chris Metcalf's avatar Chris Metcalf

arch/tile: catch up with section naming convention in 2.6.35

The convention changed to, e.g., ".data..page_aligned".  This commit
fixes the places in the tile architecture that were still using the
old convention.  One tile-specific section (.init.page) was dropped
in favor of just using an "aligned" attribute.

Sam Ravnborg <sam@ravnborg.org> pointed out __PAGE_ALIGNED_BSS, etc.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent d356b595
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
/* Group together read-mostly things to avoid cache false sharing */ /* Group together read-mostly things to avoid cache false sharing */
#define __read_mostly __attribute__((__section__(".data.read_mostly"))) #define __read_mostly __attribute__((__section__(".data..read_mostly")))
/* /*
* Attribute for data that is kept read/write coherent until the end of * Attribute for data that is kept read/write coherent until the end of
......
...@@ -133,7 +133,7 @@ ENTRY(_start) ...@@ -133,7 +133,7 @@ ENTRY(_start)
} }
ENDPROC(_start) ENDPROC(_start)
.section ".bss.page_aligned","w" __PAGE_ALIGNED_BSS
.align PAGE_SIZE .align PAGE_SIZE
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.fill PAGE_SIZE,1,0 .fill PAGE_SIZE,1,0
...@@ -148,7 +148,7 @@ ENTRY(empty_zero_page) ...@@ -148,7 +148,7 @@ ENTRY(empty_zero_page)
.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
.endm .endm
.section ".data.page_aligned","wa" __PAGE_ALIGNED_DATA
.align PAGE_SIZE .align PAGE_SIZE
ENTRY(swapper_pg_dir) ENTRY(swapper_pg_dir)
/* /*
......
...@@ -59,10 +59,7 @@ SECTIONS ...@@ -59,10 +59,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_sinitdata) = .; VMLINUX_SYMBOL(_sinitdata) = .;
.init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { INIT_DATA_SECTION(16) :data =0
*(.init.page)
} :data =0
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE) PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .; VMLINUX_SYMBOL(_einitdata) = .;
......
...@@ -46,8 +46,7 @@ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE] ...@@ -46,8 +46,7 @@ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* This page is remapped on startup to be hash-for-home. */ /* This page is remapped on startup to be hash-for-home. */
int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */] int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
__attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned")));
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
......
...@@ -445,7 +445,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) ...@@ -445,7 +445,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
/* Temporary page table we use for staging. */ /* Temporary page table we use for staging. */
static pgd_t pgtables[PTRS_PER_PGD] static pgd_t pgtables[PTRS_PER_PGD]
__attribute__((section(".init.page"))); __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
/* /*
* This maps the physical memory to kernel virtual address space, a total * This maps the physical memory to kernel virtual address space, a total
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment