Commit f89d416a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "One fix for a regression introduced by our recent rework of cache
  flushing on memory hotunplug.

  Like several other arches, our VDSO clock_getres() needed a fix to
  match the semantics of posix_get_hrtimer_res().

  A fix for a boot crash on Power9 LPARs using PCI LSI interrupts.

  A commit disabling use of the trace_imc PMU (not the core PMU) on
  Power9 systems, because it can lead to checkstops, until a workaround
  is developed.

  A handful of other minor fixes.

  Thanks to: Aneesh Kumar K.V, Anju T Sudhakar, Ard Biesheuvel,
  Christophe Leroy, Cédric Le Goater, Madhavan Srinivasan, Vincenzo
  Frascino"

* tag 'powerpc-5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/perf: Disable trace_imc pmu
  powerpc/powernv: Avoid re-registration of imc debugfs directory
  powerpc/pmem: Convert to EXPORT_SYMBOL_GPL
  powerpc/archrandom: fix arch_get_random_seed_int()
  powerpc: Fix vDSO clock_getres()
  powerpc/pmem: Fix kernel crash due to wrong range value usage in flush_dcache_range
  powerpc/xive: Skip ioremap() of ESB pages for LSI interrupts
  powerpc/kasan: Fix boot failure with RELOCATABLE && FSL_BOOKE
parents 3cf2890f 249fad73
...@@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v) ...@@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v)
unsigned long val; unsigned long val;
int rc; int rc;
rc = arch_get_random_long(&val); rc = arch_get_random_seed_long(&val);
if (rc) if (rc)
*v = val; *v = val;
......
...@@ -83,6 +83,7 @@ struct vdso_data { ...@@ -83,6 +83,7 @@ struct vdso_data {
__s64 wtom_clock_sec; /* Wall to monotonic clock sec */ __s64 wtom_clock_sec; /* Wall to monotonic clock sec */
__s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */ __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */
__s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
__u32 hrtimer_res; /* hrtimer resolution */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
}; };
...@@ -105,6 +106,7 @@ struct vdso_data { ...@@ -105,6 +106,7 @@ struct vdso_data {
__s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */ __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */
__s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
__u32 hrtimer_res; /* hrtimer resolution */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 dcache_block_size; /* L1 d-cache block size */ __u32 dcache_block_size; /* L1 d-cache block size */
__u32 icache_block_size; /* L1 i-cache block size */ __u32 icache_block_size; /* L1 i-cache block size */
......
...@@ -388,6 +388,7 @@ int main(void) ...@@ -388,6 +388,7 @@ int main(void)
OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec); OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec);
OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec); OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec);
OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res);
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size); OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size);
...@@ -413,7 +414,6 @@ int main(void) ...@@ -413,7 +414,6 @@ int main(void)
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
......
...@@ -240,6 +240,9 @@ set_ivor: ...@@ -240,6 +240,9 @@ set_ivor:
bl early_init bl early_init
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
mr r3,r30 mr r3,r30
mr r4,r31 mr r4,r31
...@@ -266,9 +269,6 @@ set_ivor: ...@@ -266,9 +269,6 @@ set_ivor:
/* /*
* Decide what sort of machine this is and initialize the MMU. * Decide what sort of machine this is and initialize the MMU.
*/ */
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
mr r3,r30 mr r3,r30
mr r4,r31 mr r4,r31
bl machine_init bl machine_init
......
...@@ -960,6 +960,7 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -960,6 +960,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->stamp_xtime_sec = xt.tv_sec; vdso_data->stamp_xtime_sec = xt.tv_sec;
vdso_data->stamp_xtime_nsec = xt.tv_nsec; vdso_data->stamp_xtime_nsec = xt.tv_nsec;
vdso_data->stamp_sec_fraction = frac_sec; vdso_data->stamp_sec_fraction = frac_sec;
vdso_data->hrtimer_res = hrtimer_resolution;
smp_wmb(); smp_wmb();
++(vdso_data->tb_update_count); ++(vdso_data->tb_update_count);
} }
......
...@@ -154,12 +154,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) ...@@ -154,12 +154,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
cror cr0*4+eq,cr0*4+eq,cr1*4+eq cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f bne cr0,99f
mflr r12
.cfi_register lr,r12
bl __get_datapage@local /* get data page */
lwz r5, CLOCK_HRTIMER_RES(r3)
mtlr r12
li r3,0 li r3,0
cmpli cr0,r4,0 cmpli cr0,r4,0
crclr cr0*4+so crclr cr0*4+so
beqlr beqlr
lis r5,CLOCK_REALTIME_RES@h
ori r5,r5,CLOCK_REALTIME_RES@l
stw r3,TSPC32_TV_SEC(r4) stw r3,TSPC32_TV_SEC(r4)
stw r5,TSPC32_TV_NSEC(r4) stw r5,TSPC32_TV_NSEC(r4)
blr blr
......
...@@ -186,12 +186,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) ...@@ -186,12 +186,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
cror cr0*4+eq,cr0*4+eq,cr1*4+eq cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f bne cr0,99f
mflr r12
.cfi_register lr,r12
bl V_LOCAL_FUNC(__get_datapage)
lwz r5, CLOCK_HRTIMER_RES(r3)
mtlr r12
li r3,0 li r3,0
cmpldi cr0,r4,0 cmpldi cr0,r4,0
crclr cr0*4+so crclr cr0*4+so
beqlr beqlr
lis r5,CLOCK_REALTIME_RES@h
ori r5,r5,CLOCK_REALTIME_RES@l
std r3,TSPC64_TV_SEC(r4) std r3,TSPC64_TV_SEC(r4)
std r5,TSPC64_TV_NSEC(r4) std r5,TSPC64_TV_NSEC(r4)
blr blr
......
...@@ -17,14 +17,14 @@ void arch_wb_cache_pmem(void *addr, size_t size) ...@@ -17,14 +17,14 @@ void arch_wb_cache_pmem(void *addr, size_t size)
unsigned long start = (unsigned long) addr; unsigned long start = (unsigned long) addr;
flush_dcache_range(start, start + size); flush_dcache_range(start, start + size);
} }
EXPORT_SYMBOL(arch_wb_cache_pmem); EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size) void arch_invalidate_pmem(void *addr, size_t size)
{ {
unsigned long start = (unsigned long) addr; unsigned long start = (unsigned long) addr;
flush_dcache_range(start, start + size); flush_dcache_range(start, start + size);
} }
EXPORT_SYMBOL(arch_invalidate_pmem); EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
/* /*
* CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols
......
...@@ -121,7 +121,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, ...@@ -121,7 +121,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
unsigned long i; unsigned long i;
for (i = start; i < stop; i += chunk) { for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, start + chunk)); flush_dcache_range(i, min(stop, i + chunk));
cond_resched(); cond_resched();
} }
} }
......
...@@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node, ...@@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node,
imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
/*
* Return here, either because 'imc' directory already exists,
* Or failed to create a new one.
*/
if (!imc_debugfs_parent) if (!imc_debugfs_parent)
return; return;
...@@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node, ...@@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node,
} }
pmu_ptr->imc_counter_mmaped = true; pmu_ptr->imc_counter_mmaped = true;
export_imc_mode_and_cmd(node, pmu_ptr);
kfree(base_addr_arr); kfree(base_addr_arr);
kfree(chipid_arr); kfree(chipid_arr);
return 0; return 0;
...@@ -151,7 +146,7 @@ static int imc_get_mem_addr_nest(struct device_node *node, ...@@ -151,7 +146,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
* and domain as the inputs. * and domain as the inputs.
* Allocates memory for the struct imc_pmu, sets up its domain, size and offsets * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
*/ */
static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
{ {
int ret = 0; int ret = 0;
struct imc_pmu *pmu_ptr; struct imc_pmu *pmu_ptr;
...@@ -159,28 +154,24 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) ...@@ -159,28 +154,24 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
/* Return for unknown domain */ /* Return for unknown domain */
if (domain < 0) if (domain < 0)
return -EINVAL; return NULL;
/* memory for pmu */ /* memory for pmu */
pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
if (!pmu_ptr) if (!pmu_ptr)
return -ENOMEM; return NULL;
/* Set the domain */ /* Set the domain */
pmu_ptr->domain = domain; pmu_ptr->domain = domain;
ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
if (ret) { if (ret)
ret = -EINVAL;
goto free_pmu; goto free_pmu;
}
if (!of_property_read_u32(parent, "offset", &offset)) { if (!of_property_read_u32(parent, "offset", &offset)) {
if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) { if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
ret = -EINVAL;
goto free_pmu; goto free_pmu;
} }
}
/* Function to register IMC pmu */ /* Function to register IMC pmu */
ret = init_imc_pmu(parent, pmu_ptr, pmu_index); ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
...@@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain) ...@@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
if (pmu_ptr->domain == IMC_DOMAIN_NEST) if (pmu_ptr->domain == IMC_DOMAIN_NEST)
kfree(pmu_ptr->mem_info); kfree(pmu_ptr->mem_info);
kfree(pmu_ptr); kfree(pmu_ptr);
return ret; return NULL;
} }
return 0; return pmu_ptr;
free_pmu: free_pmu:
kfree(pmu_ptr); kfree(pmu_ptr);
return ret; return NULL;
} }
static void disable_nest_pmu_counters(void) static void disable_nest_pmu_counters(void)
...@@ -254,6 +245,7 @@ int get_max_nest_dev(void) ...@@ -254,6 +245,7 @@ int get_max_nest_dev(void)
static int opal_imc_counters_probe(struct platform_device *pdev) static int opal_imc_counters_probe(struct platform_device *pdev)
{ {
struct device_node *imc_dev = pdev->dev.of_node; struct device_node *imc_dev = pdev->dev.of_node;
struct imc_pmu *pmu;
int pmu_count = 0, domain; int pmu_count = 0, domain;
bool core_imc_reg = false, thread_imc_reg = false; bool core_imc_reg = false, thread_imc_reg = false;
u32 type; u32 type;
...@@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev) ...@@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
} }
for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
pmu = NULL;
if (of_property_read_u32(imc_dev, "type", &type)) { if (of_property_read_u32(imc_dev, "type", &type)) {
pr_warn("IMC Device without type property\n"); pr_warn("IMC Device without type property\n");
continue; continue;
...@@ -285,7 +278,14 @@ static int opal_imc_counters_probe(struct platform_device *pdev) ...@@ -285,7 +278,14 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
domain = IMC_DOMAIN_THREAD; domain = IMC_DOMAIN_THREAD;
break; break;
case IMC_TYPE_TRACE: case IMC_TYPE_TRACE:
domain = IMC_DOMAIN_TRACE; /*
* FIXME. Using trace_imc events to monitor application
* or KVM thread performance can cause a checkstop
* (system crash).
* Disable it for now.
*/
pr_info_once("IMC: disabling trace_imc PMU\n");
domain = -1;
break; break;
default: default:
pr_warn("IMC Unknown Device type \n"); pr_warn("IMC Unknown Device type \n");
...@@ -293,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev) ...@@ -293,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
break; break;
} }
if (!imc_pmu_create(imc_dev, pmu_count, domain)) { pmu = imc_pmu_create(imc_dev, pmu_count, domain);
if (domain == IMC_DOMAIN_NEST) if (pmu != NULL) {
if (domain == IMC_DOMAIN_NEST) {
if (!imc_debugfs_parent)
export_imc_mode_and_cmd(imc_dev, pmu);
pmu_count++; pmu_count++;
}
if (domain == IMC_DOMAIN_CORE) if (domain == IMC_DOMAIN_CORE)
core_imc_reg = true; core_imc_reg = true;
if (domain == IMC_DOMAIN_THREAD) if (domain == IMC_DOMAIN_THREAD)
...@@ -303,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev) ...@@ -303,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
} }
} }
/* If none of the nest units are registered, remove debugfs interface */
if (pmu_count == 0)
debugfs_remove_recursive(imc_debugfs_parent);
/* If core imc is not registered, unregister thread-imc */ /* If core imc is not registered, unregister thread-imc */
if (!core_imc_reg && thread_imc_reg) if (!core_imc_reg && thread_imc_reg)
unregister_thread_imc(); unregister_thread_imc();
......
...@@ -392,20 +392,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) ...@@ -392,20 +392,28 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
data->esb_shift = esb_shift; data->esb_shift = esb_shift;
data->trig_page = trig_page; data->trig_page = trig_page;
data->hw_irq = hw_irq;
/* /*
* No chip-id for the sPAPR backend. This has an impact how we * No chip-id for the sPAPR backend. This has an impact how we
* pick a target. See xive_pick_irq_target(). * pick a target. See xive_pick_irq_target().
*/ */
data->src_chip = XIVE_INVALID_CHIP_ID; data->src_chip = XIVE_INVALID_CHIP_ID;
/*
* When the H_INT_ESB flag is set, the H_INT_ESB hcall should
* be used for interrupt management. Skip the remapping of the
* ESB pages which are not available.
*/
if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
return 0;
data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
if (!data->eoi_mmio) { if (!data->eoi_mmio) {
pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
return -ENOMEM; return -ENOMEM;
} }
data->hw_irq = hw_irq;
/* Full function page supports trigger */ /* Full function page supports trigger */
if (flags & XIVE_SRC_TRIGGER) { if (flags & XIVE_SRC_TRIGGER) {
data->trig_mmio = data->eoi_mmio; data->trig_mmio = data->eoi_mmio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment