Commit 14e73e78 authored by Chris Metcalf's avatar Chris Metcalf

tile: use __ro_after_init instead of tile-specific __write_once

The semantics of the old tile __write_once are the same as the
newer generic __ro_after_init, so rename them all and get rid
of the tile-specific version.

This does not enable actual support for __ro_after_init,
which had been dropped from the tile architecture before the
initial upstreaming was done, since we had at that time switched
to using 16MB huge pages to map the kernel.
Signed-off-by: default avatarChris Metcalf <cmetcalf@mellanox.com>
parent 18bfd3e6
...@@ -50,18 +50,15 @@ ...@@ -50,18 +50,15 @@
/* /*
* Originally we used small TLB pages for kernel data and grouped some * Originally we used small TLB pages for kernel data and grouped some
* things together as "write once", enforcing the property at the end * things together as ro-after-init, enforcing the property at the end
* of initialization by making those pages read-only and non-coherent. * of initialization by making those pages read-only and non-coherent.
* This allowed better cache utilization since cache inclusion did not * This allowed better cache utilization since cache inclusion did not
* need to be maintained. However, to do this requires an extra TLB * need to be maintained. However, to do this requires an extra TLB
* entry, which on balance is more of a performance hit than the * entry, which on balance is more of a performance hit than the
* non-coherence is a performance gain, so we now just make "read * non-coherence is a performance gain, so we now just make "read
* mostly" and "write once" be synonyms. We keep the attribute * mostly" and "ro-after-init" be synonyms. We keep the attribute
* separate in case we change our minds at a future date. * separate in case we change our minds at a future date.
*/ */
#define __write_once __read_mostly
/* __ro_after_init is the generic name for the tile arch __write_once. */
#define __ro_after_init __read_mostly #define __ro_after_init __read_mostly
#endif /* _ASM_TILE_CACHE_H */ #endif /* _ASM_TILE_CACHE_H */
...@@ -19,9 +19,6 @@ ...@@ -19,9 +19,6 @@
#include <asm-generic/sections.h> #include <asm-generic/sections.h>
/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];
extern char vdso_start[], vdso_end[]; extern char vdso_start[], vdso_end[];
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
extern char vdso32_start[], vdso32_end[]; extern char vdso32_start[], vdso32_end[];
......
...@@ -57,7 +57,7 @@ static int pci_probe = 1; ...@@ -57,7 +57,7 @@ static int pci_probe = 1;
* This flag tells if the platform is TILEmpower that needs * This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip. * special configuration for the PLX switch chip.
*/ */
int __write_once tile_plx_gen1; int __ro_after_init tile_plx_gen1;
static struct pci_controller controllers[TILE_NUM_PCIE]; static struct pci_controller controllers[TILE_NUM_PCIE];
static int num_controllers; static int num_controllers;
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
static inline int ABS(int x) { return x >= 0 ? x : -x; } static inline int ABS(int x) { return x >= 0 ? x : -x; }
/* Chip information */ /* Chip information */
char chip_model[64] __write_once; char chip_model[64] __ro_after_init;
#ifdef CONFIG_VT #ifdef CONFIG_VT
struct screen_info screen_info; struct screen_info screen_info;
...@@ -97,17 +97,17 @@ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; ...@@ -97,17 +97,17 @@ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* Map information from VAs to PAs */ /* Map information from VAs to PAs */
unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
__write_once __attribute__((aligned(L2_CACHE_BYTES))); __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(pbase_map); EXPORT_SYMBOL(pbase_map);
/* Map information from PAs to VAs */ /* Map information from PAs to VAs */
void *vbase_map[NR_PA_HIGHBIT_VALUES] void *vbase_map[NR_PA_HIGHBIT_VALUES]
__write_once __attribute__((aligned(L2_CACHE_BYTES))); __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(vbase_map); EXPORT_SYMBOL(vbase_map);
#endif #endif
/* Node number as a function of the high PA bits */ /* Node number as a function of the high PA bits */
int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init;
EXPORT_SYMBOL(highbits_to_node); EXPORT_SYMBOL(highbits_to_node);
static unsigned int __initdata maxmem_pfn = -1U; static unsigned int __initdata maxmem_pfn = -1U;
...@@ -844,11 +844,11 @@ static void __init zone_sizes_init(void) ...@@ -844,11 +844,11 @@ static void __init zone_sizes_init(void)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* which logical CPUs are on which nodes */ /* which logical CPUs are on which nodes */
struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init;
EXPORT_SYMBOL(node_2_cpu_mask); EXPORT_SYMBOL(node_2_cpu_mask);
/* which node each logical CPU is on */ /* which node each logical CPU is on */
char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(cpu_2_node); EXPORT_SYMBOL(cpu_2_node);
/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
...@@ -1269,7 +1269,7 @@ static void __init validate_va(void) ...@@ -1269,7 +1269,7 @@ static void __init validate_va(void)
* cpus plus any other cpus that are willing to share their cache. * cpus plus any other cpus that are willing to share their cache.
* It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
*/ */
struct cpumask __write_once cpu_lotar_map; struct cpumask __ro_after_init cpu_lotar_map;
EXPORT_SYMBOL(cpu_lotar_map); EXPORT_SYMBOL(cpu_lotar_map);
/* /*
...@@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hash_for_home_map); ...@@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hash_for_home_map);
* cache, those tiles will only appear in cpu_lotar_map, NOT in * cache, those tiles will only appear in cpu_lotar_map, NOT in
* cpu_cacheable_map, as they are a special case. * cpu_cacheable_map, as they are a special case.
*/ */
struct cpumask __write_once cpu_cacheable_map; struct cpumask __ro_after_init cpu_cacheable_map;
EXPORT_SYMBOL(cpu_cacheable_map); EXPORT_SYMBOL(cpu_cacheable_map);
static __initdata struct cpumask disabled_map; static __initdata struct cpumask disabled_map;
...@@ -1506,7 +1506,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1506,7 +1506,7 @@ void __init setup_arch(char **cmdline_p)
* Set up per-cpu memory. * Set up per-cpu memory.
*/ */
unsigned long __per_cpu_offset[NR_CPUS] __write_once; unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
EXPORT_SYMBOL(__per_cpu_offset); EXPORT_SYMBOL(__per_cpu_offset);
static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* We write to width and height with a single store in head_NN.S, * We write to width and height with a single store in head_NN.S,
* so make the variable aligned to "long". * so make the variable aligned to "long".
*/ */
HV_Topology smp_topology __write_once __aligned(sizeof(long)); HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
EXPORT_SYMBOL(smp_topology); EXPORT_SYMBOL(smp_topology);
#if CHIP_HAS_IPI() #if CHIP_HAS_IPI()
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
*/ */
/* How many cycles per second we are running at. */ /* How many cycles per second we are running at. */
static cycles_t cycles_per_sec __write_once; static cycles_t cycles_per_sec __ro_after_init;
cycles_t get_clock_rate(void) cycles_t get_clock_rate(void)
{ {
...@@ -68,7 +68,7 @@ EXPORT_SYMBOL(get_cycles); ...@@ -68,7 +68,7 @@ EXPORT_SYMBOL(get_cycles);
*/ */
#define SCHED_CLOCK_SHIFT 10 #define SCHED_CLOCK_SHIFT 10
static unsigned long sched_clock_mult __write_once; static unsigned long sched_clock_mult __ro_after_init;
static cycles_t clocksource_get_cycles(struct clocksource *cs) static cycles_t clocksource_get_cycles(struct clocksource *cs)
{ {
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
* The noallocl2 option suppresses all use of the L2 cache to cache * The noallocl2 option suppresses all use of the L2 cache to cache
* locally from a remote home. * locally from a remote home.
*/ */
static int __write_once noallocl2; static int __ro_after_init noallocl2;
static int __init set_noallocl2(char *str) static int __init set_noallocl2(char *str)
{ {
noallocl2 = 1; noallocl2 = 1;
......
...@@ -190,9 +190,9 @@ static void __init page_table_range_init(unsigned long start, ...@@ -190,9 +190,9 @@ static void __init page_table_range_init(unsigned long start,
static int __initdata ktext_hash = 1; /* .text pages */ static int __initdata ktext_hash = 1; /* .text pages */
static int __initdata kdata_hash = 1; /* .data and .bss pages */ static int __initdata kdata_hash = 1; /* .data and .bss pages */
int __write_once hash_default = 1; /* kernel allocator pages */ int __ro_after_init hash_default = 1; /* kernel allocator pages */
EXPORT_SYMBOL(hash_default); EXPORT_SYMBOL(hash_default);
int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ int __ro_after_init kstack_hash = 1; /* if no homecaching, use h4h */
/* /*
* CPUs to use to for striping the pages of kernel data. If hash-for-home * CPUs to use to for striping the pages of kernel data. If hash-for-home
...@@ -203,7 +203,7 @@ int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ ...@@ -203,7 +203,7 @@ int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
static __initdata struct cpumask kdata_mask; static __initdata struct cpumask kdata_mask;
static __initdata int kdata_arg_seen; static __initdata int kdata_arg_seen;
int __write_once kdata_huge; /* if no homecaching, small pages */ int __ro_after_init kdata_huge; /* if no homecaching, small pages */
/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
...@@ -896,8 +896,8 @@ void __init pgtable_cache_init(void) ...@@ -896,8 +896,8 @@ void __init pgtable_cache_init(void)
panic("pgtable_cache_init(): Cannot create pgd cache"); panic("pgtable_cache_init(): Cannot create pgd cache");
} }
static long __write_once initfree = 1; static long __ro_after_init initfree = 1;
static bool __write_once set_initfree_done; static bool __ro_after_init set_initfree_done;
/* Select whether to free (1) or mark unusable (0) the __init pages. */ /* Select whether to free (1) or mark unusable (0) the __init pages. */
static int __init set_initfree(char *str) static int __init set_initfree(char *str)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment