Commit e7270e47 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

 - Fix physical vs virtual confusion in some basic mm macros and
   routines. Caused by __pa == __va on s390 currently.

 - Get rid of on-stack cpu masks.

 - Add support for complete CPU counter set extraction.

 - Add arch_irq_work_raise implementation.

 - virtio-ccw revision and opcode fixes.

* tag 's390-5.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/cpumf: Add support for complete counter set extraction
  virtio/s390: implement virtio-ccw revision 2 correctly
  s390/smp: implement arch_irq_work_raise()
  s390/topology: move cpumasks away from stack
  s390/smp: smp_emergency_stop() - move cpumask away from stack
  s390/smp: __smp_rescan_cpus() - move cpumask away from stack
  s390/smp: consolidate locking for smp_rescan()
  s390/mm: fix phys vs virt confusion in vmem_*() functions family
  s390/mm: fix phys vs virt confusion in pgtable allocation routines
  s390/mm: fix invalid __pa() usage in pfn_pXd() macros
  s390/mm: make pXd_deref() macros return a pointer
  s390/opcodes: rename selhhhr to selfhr
parents c19798af cf6acb8b
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_IRQ_WORK_H
#define _ASM_S390_IRQ_WORK_H
static inline bool arch_irq_work_has_interrupt(void)
{
return true;
}
void arch_irq_work_raise(void);
#endif /* _ASM_S390_IRQ_WORK_H */
...@@ -135,7 +135,7 @@ static inline void pmd_populate(struct mm_struct *mm, ...@@ -135,7 +135,7 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
#define pmd_pgtable(pmd) \ #define pmd_pgtable(pmd) \
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
/* /*
* page table entry allocation/free routines. * page table entry allocation/free routines.
......
...@@ -1219,8 +1219,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) ...@@ -1219,8 +1219,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
static inline unsigned long pmd_deref(pmd_t pmd) static inline unsigned long pmd_deref(pmd_t pmd)
{ {
...@@ -1229,12 +1229,12 @@ static inline unsigned long pmd_deref(pmd_t pmd) ...@@ -1229,12 +1229,12 @@ static inline unsigned long pmd_deref(pmd_t pmd)
origin_mask = _SEGMENT_ENTRY_ORIGIN; origin_mask = _SEGMENT_ENTRY_ORIGIN;
if (pmd_large(pmd)) if (pmd_large(pmd))
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
return pmd_val(pmd) & origin_mask; return (unsigned long)__va(pmd_val(pmd) & origin_mask);
} }
static inline unsigned long pmd_pfn(pmd_t pmd) static inline unsigned long pmd_pfn(pmd_t pmd)
{ {
return pmd_deref(pmd) >> PAGE_SHIFT; return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
} }
static inline unsigned long pud_deref(pud_t pud) static inline unsigned long pud_deref(pud_t pud)
...@@ -1244,12 +1244,12 @@ static inline unsigned long pud_deref(pud_t pud) ...@@ -1244,12 +1244,12 @@ static inline unsigned long pud_deref(pud_t pud)
origin_mask = _REGION_ENTRY_ORIGIN; origin_mask = _REGION_ENTRY_ORIGIN;
if (pud_large(pud)) if (pud_large(pud))
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
return pud_val(pud) & origin_mask; return (unsigned long)__va(pud_val(pud) & origin_mask);
} }
static inline unsigned long pud_pfn(pud_t pud) static inline unsigned long pud_pfn(pud_t pud)
{ {
return pud_deref(pud) >> PAGE_SHIFT; return __pa(pud_deref(pud)) >> PAGE_SHIFT;
} }
/* /*
...@@ -1329,7 +1329,7 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end) ...@@ -1329,7 +1329,7 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
} }
#define gup_fast_permitted gup_fast_permitted #define gup_fast_permitted gup_fast_permitted
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
...@@ -1636,7 +1636,7 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, ...@@ -1636,7 +1636,7 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
} }
#define pmdp_collapse_flush pmdp_collapse_flush #define pmdp_collapse_flush pmdp_collapse_flush
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
static inline int pmd_trans_huge(pmd_t pmd) static inline int pmd_trans_huge(pmd_t pmd)
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright IBM Corp. 2021
* Interface implementation for communication with the CPU Measurement
* counter facility device driver.
*
* Author(s): Thomas Richter <tmricht@linux.ibm.com>
*
* Define for ioctl() commands to communicate with the CPU Measurement
* counter facility device driver.
*/
#ifndef _PERF_CPUM_CF_DIAG_H
#define _PERF_CPUM_CF_DIAG_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define S390_HWCTR_DEVICE "hwctr"
#define S390_HWCTR_START_VERSION 1
struct s390_ctrset_start { /* Set CPUs to operate on */
__u64 version; /* Version of interface */
__u64 data_bytes; /* # of bytes required */
__u64 cpumask_len; /* Length of CPU mask in bytes */
__u64 *cpumask; /* Pointer to CPU mask */
__u64 counter_sets; /* Bit mask of counter sets to get */
};
struct s390_ctrset_setdata { /* Counter set data */
__u32 set; /* Counter set number */
__u32 no_cnts; /* # of counters stored in cv[] */
__u64 cv[0]; /* Counter values (variable length) */
};
struct s390_ctrset_cpudata { /* Counter set data per CPU */
__u32 cpu_nr; /* CPU number */
__u32 no_sets; /* # of counters sets in data[] */
struct s390_ctrset_setdata data[0];
};
struct s390_ctrset_read { /* Structure to get all ctr sets */
__u64 no_cpus; /* Total # of CPUs data taken from */
struct s390_ctrset_cpudata data[0];
};
#define S390_HWCTR_MAGIC 'C' /* Random magic # for ioctls */
#define S390_HWCTR_START _IOWR(S390_HWCTR_MAGIC, 1, struct s390_ctrset_start)
#define S390_HWCTR_STOP _IO(S390_HWCTR_MAGIC, 2)
#define S390_HWCTR_READ _IOWR(S390_HWCTR_MAGIC, 3, struct s390_ctrset_read)
#endif
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Performance event support for s390x - CPU-measurement Counter Sets * Performance event support for s390x - CPU-measurement Counter Sets
* *
* Copyright IBM Corp. 2019 * Copyright IBM Corp. 2019, 2021
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
* Thomas Richer <tmricht@linux.ibm.com> * Thomas Richer <tmricht@linux.ibm.com>
*/ */
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/processor.h> #include <linux/processor.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <asm/ctl_reg.h> #include <asm/ctl_reg.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -24,15 +26,20 @@ ...@@ -24,15 +26,20 @@
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/debug.h> #include <asm/debug.h>
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ #include <asm/perf_cpum_cf_diag.h>
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
/* interval in seconds */
static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
static unsigned int cf_diag_cpu_speed; static unsigned int cf_diag_cpu_speed;
static debug_info_t *cf_diag_dbg; static debug_info_t *cf_diag_dbg;
struct cf_diag_csd { /* Counter set data per CPU */ struct cf_diag_csd { /* Counter set data per CPU */
size_t used; /* Bytes used in data/start */ size_t used; /* Bytes used in data/start */
unsigned char start[PAGE_SIZE]; /* Counter set at event start */ unsigned char start[PAGE_SIZE]; /* Counter set at event start */
unsigned char data[PAGE_SIZE]; /* Counter set at event delete */ unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
unsigned int sets; /* # Counter set saved in data */
}; };
static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
...@@ -178,18 +185,35 @@ static void cf_diag_disable(struct pmu *pmu) ...@@ -178,18 +185,35 @@ static void cf_diag_disable(struct pmu *pmu)
/* Number of perf events counting hardware events */ /* Number of perf events counting hardware events */
static atomic_t cf_diag_events = ATOMIC_INIT(0); static atomic_t cf_diag_events = ATOMIC_INIT(0);
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(cf_diag_reserve_mutex);
/* Release the PMU if event is the last perf event */ /* Release the PMU if event is the last perf event */
static void cf_diag_perf_event_destroy(struct perf_event *event) static void cf_diag_perf_event_destroy(struct perf_event *event)
{ {
debug_sprintf_event(cf_diag_dbg, 5, debug_sprintf_event(cf_diag_dbg, 5,
"%s event %p cpu %d cf_diag_events %d\n", "%s event %p cpu %d cf_diag_events %d\n",
__func__, event, event->cpu, __func__, event, smp_processor_id(),
atomic_read(&cf_diag_events)); atomic_read(&cf_diag_events));
if (atomic_dec_return(&cf_diag_events) == 0) if (atomic_dec_return(&cf_diag_events) == 0)
__kernel_cpumcf_end(); __kernel_cpumcf_end();
} }
static int get_authctrsets(void)
{
struct cpu_cf_events *cpuhw;
unsigned long auth = 0;
enum cpumf_ctr_set i;
cpuhw = &get_cpu_var(cpu_cf_events);
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
auth |= cpumf_ctr_ctl[i];
}
put_cpu_var(cpu_cf_events);
return auth;
}
/* Setup the event. Test for authorized counter sets and only include counter /* Setup the event. Test for authorized counter sets and only include counter
* sets which are authorized at the time of the setup. Including unauthorized * sets which are authorized at the time of the setup. Including unauthorized
* counter sets result in specification exception (and panic). * counter sets result in specification exception (and panic).
...@@ -197,15 +221,12 @@ static void cf_diag_perf_event_destroy(struct perf_event *event) ...@@ -197,15 +221,12 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
static int __hw_perf_event_init(struct perf_event *event) static int __hw_perf_event_init(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
struct cpu_cf_events *cpuhw;
enum cpumf_ctr_set i;
int err = 0; int err = 0;
debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__, debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
event, event->cpu); event, event->cpu);
event->hw.config = attr->config; event->hw.config = attr->config;
event->hw.config_base = 0;
/* Add all authorized counter sets to config_base. The /* Add all authorized counter sets to config_base. The
* the hardware init function is either called per-cpu or just once * the hardware init function is either called per-cpu or just once
...@@ -215,11 +236,7 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -215,11 +236,7 @@ static int __hw_perf_event_init(struct perf_event *event)
* Checking the authorization on any CPU is fine as the hardware * Checking the authorization on any CPU is fine as the hardware
* applies the same authorization settings to all CPUs. * applies the same authorization settings to all CPUs.
*/ */
cpuhw = &get_cpu_var(cpu_cf_events); event->hw.config_base = get_authctrsets();
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
event->hw.config_base |= cpumf_ctr_ctl[i];
put_cpu_var(cpu_cf_events);
/* No authorized counter sets, nothing to count/sample */ /* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base) { if (!event->hw.config_base) {
...@@ -237,6 +254,25 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -237,6 +254,25 @@ static int __hw_perf_event_init(struct perf_event *event)
return err; return err;
} }
/* Return 0 if the CPU-measurement counter facility is currently free
* and an error otherwise.
*/
static int cf_diag_perf_event_inuse(void)
{
int err = 0;
if (!atomic_inc_not_zero(&cf_diag_events)) {
mutex_lock(&cf_diag_reserve_mutex);
if (atomic_read(&cf_diag_events) == 0 &&
__kernel_cpumcf_begin())
err = -EBUSY;
else
err = atomic_inc_return(&cf_diag_events);
mutex_unlock(&cf_diag_reserve_mutex);
}
return err;
}
static int cf_diag_event_init(struct perf_event *event) static int cf_diag_event_init(struct perf_event *event)
{ {
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
...@@ -264,13 +300,9 @@ static int cf_diag_event_init(struct perf_event *event) ...@@ -264,13 +300,9 @@ static int cf_diag_event_init(struct perf_event *event)
} }
/* Initialize for using the CPU-measurement counter facility */ /* Initialize for using the CPU-measurement counter facility */
if (atomic_inc_return(&cf_diag_events) == 1) { err = cf_diag_perf_event_inuse();
if (__kernel_cpumcf_begin()) { if (err < 0)
atomic_dec(&cf_diag_events); goto out;
err = -EBUSY;
goto out;
}
}
event->destroy = cf_diag_perf_event_destroy; event->destroy = cf_diag_perf_event_destroy;
err = __hw_perf_event_init(event); err = __hw_perf_event_init(event);
...@@ -599,6 +631,8 @@ static void cf_diag_del(struct perf_event *event, int flags) ...@@ -599,6 +631,8 @@ static void cf_diag_del(struct perf_event *event, int flags)
cpuhw->flags &= ~PMU_F_IN_USE; cpuhw->flags &= ~PMU_F_IN_USE;
} }
/* Default counter set events and format attribute groups */
CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG); CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
static struct attribute *cf_diag_events_attr[] = { static struct attribute *cf_diag_events_attr[] = {
...@@ -663,6 +697,452 @@ static void cf_diag_get_cpu_speed(void) ...@@ -663,6 +697,452 @@ static void cf_diag_get_cpu_speed(void)
} }
} }
/* Code to create device and file I/O operations */
static atomic_t ctrset_opencnt = ATOMIC_INIT(0); /* Excl. access */
static int cf_diag_open(struct inode *inode, struct file *file)
{
int err = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (atomic_xchg(&ctrset_opencnt, 1))
return -EBUSY;
/* Avoid concurrent access with perf_event_open() system call */
mutex_lock(&cf_diag_reserve_mutex);
if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin())
err = -EBUSY;
mutex_unlock(&cf_diag_reserve_mutex);
if (err) {
atomic_set(&ctrset_opencnt, 0);
return err;
}
file->private_data = NULL;
debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
/* nonseekable_open() never fails */
return nonseekable_open(inode, file);
}
/* Variables for ioctl() interface support */
static DEFINE_MUTEX(cf_diag_ctrset_mutex);
static struct cf_diag_ctrset {
unsigned long ctrset; /* Bit mask of counter set to read */
cpumask_t mask; /* CPU mask to read from */
time64_t lastread; /* Epoch counter set last read */
} cf_diag_ctrset;
static void cf_diag_ctrset_clear(void)
{
cpumask_clear(&cf_diag_ctrset.mask);
cf_diag_ctrset.ctrset = 0;
}
static void cf_diag_release_cpu(void *p)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__,
smp_processor_id());
lcctl(0); /* Reset counter sets */
cpuhw->state = 0; /* Save state in CPU hardware state */
}
/* Release function is also called when application gets terminated without
* doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
* Since only one application is allowed to open the device, simple stop all
* CPU counter sets.
*/
static int cf_diag_release(struct inode *inode, struct file *file)
{
on_each_cpu(cf_diag_release_cpu, NULL, 1);
cf_diag_ctrset_clear();
atomic_set(&ctrset_opencnt, 0);
__kernel_cpumcf_end();
debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
return 0;
}
struct cf_diag_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
unsigned int sets; /* Counter set bit mask */
atomic_t cpus_ack; /* # CPUs successfully executed func */
};
static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask)
{
struct s390_ctrset_read __user *ctrset_read;
unsigned int cpu, cpus, rc;
void __user *uptr;
ctrset_read = (struct s390_ctrset_read __user *)arg;
uptr = ctrset_read->data;
for_each_cpu(cpu, mask) {
struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu);
struct s390_ctrset_cpudata __user *ctrset_cpudata;
ctrset_cpudata = uptr;
debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n",
__func__, cpu, csd->used);
rc = put_user(cpu, &ctrset_cpudata->cpu_nr);
rc |= put_user(csd->sets, &ctrset_cpudata->no_sets);
rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used);
if (rc)
return -EFAULT;
uptr += sizeof(struct s390_ctrset_cpudata) + csd->used;
cond_resched();
}
cpus = cpumask_weight(mask);
if (put_user(cpus, &ctrset_read->no_cpus))
return -EFAULT;
debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n",
__func__, uptr - (void __user *)ctrset_read->data);
return 0;
}
static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
int ctrset_size, size_t room)
{
size_t need = 0;
int rc = -1;
need = sizeof(*p) + sizeof(u64) * ctrset_size;
debug_sprintf_event(cf_diag_dbg, 5,
"%s room %zd need %zd set %#x set_size %d\n",
__func__, room, need, ctrset, ctrset_size);
if (need <= room) {
p->set = cpumf_ctr_ctl[ctrset];
p->no_cnts = ctrset_size;
rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
if (rc == 3) /* Nothing stored */
need = 0;
}
debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__,
need, rc);
return need;
}
/* Read all counter sets. Since the perf_event_open() system call with
* event cpum_cf_diag/.../ is blocked when this interface is active, reuse
* the perf_event_open() data buffer to store the counter sets.
*/
static void cf_diag_cpu_read(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
struct cf_diag_call_on_cpu_parm *p = parm;
int set, set_size;
size_t space;
debug_sprintf_event(cf_diag_dbg, 5,
"%s new %#x flags %#x state %#llx\n",
__func__, p->sets, cpuhw->flags,
cpuhw->state);
/* No data saved yet */
csd->used = 0;
csd->sets = 0;
memset(csd->data, 0, sizeof(csd->data));
/* Scan the counter sets */
for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used;
if (!(p->sets & cpumf_ctr_ctl[set]))
continue; /* Counter set not in list */
set_size = cf_diag_ctrset_size(set, &cpuhw->info);
space = sizeof(csd->data) - csd->used;
space = cf_diag_cpuset_read(sp, set, set_size, space);
if (space) {
csd->used += space;
csd->sets += 1;
}
debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n",
__func__, sp, space);
}
debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__,
csd->sets, csd->used);
}
static int cf_diag_all_read(unsigned long arg)
{
struct cf_diag_call_on_cpu_parm p;
cpumask_var_t mask;
time64_t now;
int rc = 0;
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
now = ktime_get_seconds();
if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
" lastread %lld\n", __func__, now,
cf_diag_ctrset.lastread);
rc = -EAGAIN;
goto out;
} else {
cf_diag_ctrset.lastread = now;
}
p.sets = cf_diag_ctrset.ctrset;
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
rc = cf_diag_all_copy(arg, mask);
out:
free_cpumask_var(mask);
debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
return rc;
}
/* Stop all counter sets via ioctl interface */
static void cf_diag_ioctl_off(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct cf_diag_call_on_cpu_parm *p = parm;
int rc;
debug_sprintf_event(cf_diag_dbg, 5,
"%s new %#x flags %#x state %#llx\n",
__func__, p->sets, cpuhw->flags,
cpuhw->state);
ctr_set_multiple_disable(&cpuhw->state, p->sets);
ctr_set_multiple_stop(&cpuhw->state, p->sets);
rc = lcctl(cpuhw->state); /* Stop counter sets */
if (!cpuhw->state)
cpuhw->flags &= ~PMU_F_IN_USE;
debug_sprintf_event(cf_diag_dbg, 5,
"%s rc %d flags %#x state %#llx\n", __func__,
rc, cpuhw->flags, cpuhw->state);
}
/* Start counter sets on particular CPU */
static void cf_diag_ioctl_on(void *parm)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct cf_diag_call_on_cpu_parm *p = parm;
int rc;
debug_sprintf_event(cf_diag_dbg, 5,
"%s new %#x flags %#x state %#llx\n",
__func__, p->sets, cpuhw->flags,
cpuhw->state);
if (!(cpuhw->flags & PMU_F_IN_USE))
cpuhw->state = 0;
cpuhw->flags |= PMU_F_IN_USE;
rc = lcctl(cpuhw->state); /* Reset unused counter sets */
ctr_set_multiple_enable(&cpuhw->state, p->sets);
ctr_set_multiple_start(&cpuhw->state, p->sets);
rc |= lcctl(cpuhw->state); /* Start counter sets */
if (!rc)
atomic_inc(&p->cpus_ack);
debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n",
__func__, rc, cpuhw->state);
}
static int cf_diag_all_stop(void)
{
struct cf_diag_call_on_cpu_parm p = {
.sets = cf_diag_ctrset.ctrset,
};
cpumask_var_t mask;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
free_cpumask_var(mask);
return 0;
}
static int cf_diag_all_start(void)
{
struct cf_diag_call_on_cpu_parm p = {
.sets = cf_diag_ctrset.ctrset,
.cpus_ack = ATOMIC_INIT(0),
};
cpumask_var_t mask;
int rc = 0;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1);
if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
rc = -EIO;
}
free_cpumask_var(mask);
return rc;
}
/* Return the maximum required space for all possible CPUs in case one
* CPU will be onlined during the START, READ, STOP cycles.
* To find out the size of the counter sets, any one CPU will do. They
* all have the same counter sets.
*/
static size_t cf_diag_needspace(unsigned int sets)
{
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
size_t bytes = 0;
int i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (!(sets & cpumf_ctr_ctl[i]))
continue;
bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
sizeof(((struct s390_ctrset_setdata *)0)->set) +
sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
}
bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
(bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
bytes);
return bytes;
}
static long cf_diag_ioctl_read(unsigned long arg)
{
struct s390_ctrset_read read;
int ret = 0;
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
return -EFAULT;
ret = cf_diag_all_read(arg);
debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
return ret;
}
static long cf_diag_ioctl_stop(void)
{
int ret;
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
ret = cf_diag_all_stop();
cf_diag_ctrset_clear();
debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
return ret;
}
static long cf_diag_ioctl_start(unsigned long arg)
{
struct s390_ctrset_start __user *ustart;
struct s390_ctrset_start start;
void __user *umask;
unsigned int len;
int ret = 0;
size_t need;
if (cf_diag_ctrset.ctrset)
return -EBUSY;
ustart = (struct s390_ctrset_start __user *)arg;
if (copy_from_user(&start, ustart, sizeof(start)))
return -EFAULT;
if (start.version != S390_HWCTR_START_VERSION)
return -EINVAL;
if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
return -EINVAL; /* Invalid counter set */
if (!start.counter_sets)
return -EINVAL; /* No counter set at all? */
cpumask_clear(&cf_diag_ctrset.mask);
len = min_t(u64, start.cpumask_len, cpumask_size());
umask = (void __user *)start.cpumask;
if (copy_from_user(&cf_diag_ctrset.mask, umask, len))
return -EFAULT;
if (cpumask_empty(&cf_diag_ctrset.mask))
return -EINVAL;
need = cf_diag_needspace(start.counter_sets);
if (put_user(need, &ustart->data_bytes))
ret = -EFAULT;
if (ret)
goto out;
cf_diag_ctrset.ctrset = start.counter_sets;
ret = cf_diag_all_start();
out:
if (ret)
cf_diag_ctrset_clear();
debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n",
__func__, cf_diag_ctrset.ctrset, need, ret);
return ret;
}
static long cf_diag_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__,
cmd, arg);
get_online_cpus();
mutex_lock(&cf_diag_ctrset_mutex);
switch (cmd) {
case S390_HWCTR_START:
ret = cf_diag_ioctl_start(arg);
break;
case S390_HWCTR_STOP:
ret = cf_diag_ioctl_stop();
break;
case S390_HWCTR_READ:
ret = cf_diag_ioctl_read(arg);
break;
default:
ret = -ENOTTY;
break;
}
mutex_unlock(&cf_diag_ctrset_mutex);
put_online_cpus();
debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret);
return ret;
}
static const struct file_operations cf_diag_fops = {
.owner = THIS_MODULE,
.open = cf_diag_open,
.release = cf_diag_release,
.unlocked_ioctl = cf_diag_ioctl,
.compat_ioctl = cf_diag_ioctl,
.llseek = no_llseek
};
static struct miscdevice cf_diag_dev = {
.name = S390_HWCTR_DEVICE,
.minor = MISC_DYNAMIC_MINOR,
.fops = &cf_diag_fops,
};
static int cf_diag_online_cpu(unsigned int cpu)
{
struct cf_diag_call_on_cpu_parm p;
mutex_lock(&cf_diag_ctrset_mutex);
if (!cf_diag_ctrset.ctrset)
goto out;
p.sets = cf_diag_ctrset.ctrset;
cf_diag_ioctl_on(&p);
out:
mutex_unlock(&cf_diag_ctrset_mutex);
return 0;
}
static int cf_diag_offline_cpu(unsigned int cpu)
{
struct cf_diag_call_on_cpu_parm p;
mutex_lock(&cf_diag_ctrset_mutex);
if (!cf_diag_ctrset.ctrset)
goto out;
p.sets = cf_diag_ctrset.ctrset;
cf_diag_ioctl_off(&p);
out:
mutex_unlock(&cf_diag_ctrset_mutex);
return 0;
}
/* Initialize the counter set PMU to generate complete counter set data as /* Initialize the counter set PMU to generate complete counter set data as
* event raw data. This relies on the CPU Measurement Counter Facility device * event raw data. This relies on the CPU Measurement Counter Facility device
* already being loaded and initialized. * already being loaded and initialized.
...@@ -685,21 +1165,43 @@ static int __init cf_diag_init(void) ...@@ -685,21 +1165,43 @@ static int __init cf_diag_init(void)
return -ENOMEM; return -ENOMEM;
} }
rc = misc_register(&cf_diag_dev);
if (rc) {
pr_err("Registration of /dev/" S390_HWCTR_DEVICE
"failed rc=%d\n", rc);
goto out;
}
/* Setup s390dbf facility */ /* Setup s390dbf facility */
cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128); cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
if (!cf_diag_dbg) { if (!cf_diag_dbg) {
pr_err("Registration of s390dbf(cpum_cf_diag) failed\n"); pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
return -ENOMEM; rc = -ENOMEM;
goto out_dbf;
} }
debug_register_view(cf_diag_dbg, &debug_sprintf_view); debug_register_view(cf_diag_dbg, &debug_sprintf_view);
rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1); rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) { if (rc) {
debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
debug_unregister(cf_diag_dbg);
pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n", pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
rc); rc);
goto out_perf;
} }
rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE,
"perf/s390/cfd:online",
cf_diag_online_cpu, cf_diag_offline_cpu);
if (!rc)
goto out;
pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n",
rc);
perf_pmu_unregister(&cf_diag);
out_perf:
debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
debug_unregister(cf_diag_dbg);
out_dbf:
misc_deregister(&cf_diag_dev);
out:
return rc; return rc;
} }
arch_initcall(cf_diag_init); device_initcall(cf_diag_init);
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/irq_work.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched/hotplug.h> #include <linux/sched/hotplug.h>
...@@ -62,6 +63,7 @@ enum { ...@@ -62,6 +63,7 @@ enum {
ec_call_function_single, ec_call_function_single,
ec_stop_cpu, ec_stop_cpu,
ec_mcck_pending, ec_mcck_pending,
ec_irq_work,
}; };
enum { enum {
...@@ -434,10 +436,12 @@ void notrace smp_yield_cpu(int cpu) ...@@ -434,10 +436,12 @@ void notrace smp_yield_cpu(int cpu)
*/ */
void notrace smp_emergency_stop(void) void notrace smp_emergency_stop(void)
{ {
cpumask_t cpumask; static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
static cpumask_t cpumask;
u64 end; u64 end;
int cpu; int cpu;
arch_spin_lock(&lock);
cpumask_copy(&cpumask, cpu_online_mask); cpumask_copy(&cpumask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &cpumask); cpumask_clear_cpu(smp_processor_id(), &cpumask);
...@@ -458,6 +462,7 @@ void notrace smp_emergency_stop(void) ...@@ -458,6 +462,7 @@ void notrace smp_emergency_stop(void)
break; break;
cpu_relax(); cpu_relax();
} }
arch_spin_unlock(&lock);
} }
NOKPROBE_SYMBOL(smp_emergency_stop); NOKPROBE_SYMBOL(smp_emergency_stop);
...@@ -505,6 +510,8 @@ static void smp_handle_ext_call(void) ...@@ -505,6 +510,8 @@ static void smp_handle_ext_call(void)
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
if (test_bit(ec_mcck_pending, &bits)) if (test_bit(ec_mcck_pending, &bits))
__s390_handle_mcck(); __s390_handle_mcck();
if (test_bit(ec_irq_work, &bits))
irq_work_run();
} }
static void do_ext_call_interrupt(struct ext_code ext_code, static void do_ext_call_interrupt(struct ext_code ext_code,
...@@ -537,6 +544,13 @@ void smp_send_reschedule(int cpu) ...@@ -537,6 +544,13 @@ void smp_send_reschedule(int cpu)
pcpu_ec_call(pcpu_devices + cpu, ec_schedule); pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
} }
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
}
#endif
/* /*
* parameter area for the set/clear control bit callbacks * parameter area for the set/clear control bit callbacks
*/ */
...@@ -775,11 +789,13 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, ...@@ -775,11 +789,13 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{ {
struct sclp_core_entry *core; struct sclp_core_entry *core;
cpumask_t avail; static cpumask_t avail;
bool configured; bool configured;
u16 core_id; u16 core_id;
int nr, i; int nr, i;
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = 0; nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
/* /*
...@@ -800,6 +816,8 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) ...@@ -800,6 +816,8 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
configured = i < info->configured; configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early); nr += smp_add_core(&info->core[i], &avail, configured, early);
} }
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
return nr; return nr;
} }
...@@ -847,9 +865,7 @@ void __init smp_detect_cpus(void) ...@@ -847,9 +865,7 @@ void __init smp_detect_cpus(void)
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
/* Add CPUs present at boot */ /* Add CPUs present at boot */
get_online_cpus();
__smp_rescan_cpus(info, true); __smp_rescan_cpus(info, true);
put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info)); memblock_free_early((unsigned long)info, sizeof(*info));
} }
...@@ -1178,11 +1194,7 @@ int __ref smp_rescan_cpus(void) ...@@ -1178,11 +1194,7 @@ int __ref smp_rescan_cpus(void)
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
smp_get_core_info(info, 0); smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, false); nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info); kfree(info);
if (nr) if (nr)
topology_schedule_update(); topology_schedule_update();
......
...@@ -62,16 +62,16 @@ static struct mask_info drawer_info; ...@@ -62,16 +62,16 @@ static struct mask_info drawer_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS]; struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology); EXPORT_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
{ {
cpumask_t mask; static cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
switch (topology_mode) { switch (topology_mode) {
case TOPOLOGY_MODE_HW: case TOPOLOGY_MODE_HW:
while (info) { while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) { if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask; cpumask_copy(&mask, &info->mask);
break; break;
} }
info = info->next; info = info->next;
...@@ -89,23 +89,24 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) ...@@ -89,23 +89,24 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
break; break;
} }
cpumask_and(&mask, &mask, cpu_online_mask); cpumask_and(&mask, &mask, cpu_online_mask);
return mask; cpumask_copy(dst, &mask);
} }
static cpumask_t cpu_thread_map(unsigned int cpu) static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
{ {
cpumask_t mask; static cpumask_t mask;
int i; int i;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
if (topology_mode != TOPOLOGY_MODE_HW) if (topology_mode != TOPOLOGY_MODE_HW)
return mask; goto out;
cpu -= cpu % (smp_cpu_mtid + 1); cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++) for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_present(cpu + i)) if (cpu_present(cpu + i))
cpumask_set_cpu(cpu + i, &mask); cpumask_set_cpu(cpu + i, &mask);
cpumask_and(&mask, &mask, cpu_online_mask); cpumask_and(&mask, &mask, cpu_online_mask);
return mask; out:
cpumask_copy(dst, &mask);
} }
#define TOPOLOGY_CORE_BITS 64 #define TOPOLOGY_CORE_BITS 64
...@@ -250,10 +251,10 @@ void update_cpu_masks(void) ...@@ -250,10 +251,10 @@ void update_cpu_masks(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu]; topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu); cpu_thread_map(&topo->thread_mask, cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu); cpu_group_map(&topo->core_mask, &socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu); cpu_group_map(&topo->book_mask, &book_info, cpu);
topo->drawer_mask = cpu_group_map(&drawer_info, cpu); cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
topo->booted_cores = 0; topo->booted_cores = 0;
if (topology_mode != TOPOLOGY_MODE_HW) { if (topology_mode != TOPOLOGY_MODE_HW) {
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
......
...@@ -58,7 +58,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) ...@@ -58,7 +58,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
if (!page) if (!page)
return NULL; return NULL;
arch_set_page_dat(page, 2); arch_set_page_dat(page, 2);
return (unsigned long *) page_to_phys(page); return (unsigned long *) page_to_virt(page);
} }
void crst_table_free(struct mm_struct *mm, unsigned long *table) void crst_table_free(struct mm_struct *mm, unsigned long *table)
...@@ -161,7 +161,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm) ...@@ -161,7 +161,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
if (page) { if (page) {
table = (u64 *)page_to_phys(page); table = (u64 *)page_to_virt(page);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE); memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} }
...@@ -194,7 +194,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -194,7 +194,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
mask = atomic_read(&page->_refcount) >> 24; mask = atomic_read(&page->_refcount) >> 24;
mask = (mask | (mask >> 4)) & 3; mask = (mask | (mask >> 4)) & 3;
if (mask != 3) { if (mask != 3) {
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_virt(page);
bit = mask & 1; /* =1 -> second 2K */ bit = mask & 1; /* =1 -> second 2K */
if (bit) if (bit)
table += PTRS_PER_PTE; table += PTRS_PER_PTE;
...@@ -217,7 +217,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -217,7 +217,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
} }
arch_set_page_dat(page, 0); arch_set_page_dat(page, 0);
/* Initialize page table */ /* Initialize page table */
table = (unsigned long *) page_to_phys(page); table = (unsigned long *) page_to_virt(page);
if (mm_alloc_pgste(mm)) { if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */ /* Return 4K page table with PGSTEs */
atomic_xor_bits(&page->_refcount, 3 << 24); atomic_xor_bits(&page->_refcount, 3 << 24);
...@@ -239,10 +239,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) ...@@ -239,10 +239,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page; struct page *page;
unsigned int bit, mask; unsigned int bit, mask;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = virt_to_page(table);
if (!mm_alloc_pgste(mm)) { if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */ /* Free 2K page table fragment of a 4K page */
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock); spin_lock_bh(&mm->context.lock);
mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
mask >>= 24; mask >>= 24;
...@@ -269,14 +269,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, ...@@ -269,14 +269,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
unsigned int bit, mask; unsigned int bit, mask;
mm = tlb->mm; mm = tlb->mm;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT); page = virt_to_page(table);
if (mm_alloc_pgste(mm)) { if (mm_alloc_pgste(mm)) {
gmap_unlink(mm, table, vmaddr); gmap_unlink(mm, table, vmaddr);
table = (unsigned long *) (__pa(table) | 3); table = (unsigned long *) ((unsigned long)table | 3);
tlb_remove_table(tlb, table); tlb_remove_table(tlb, table);
return; return;
} }
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.lock); spin_lock_bh(&mm->context.lock);
mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
mask >>= 24; mask >>= 24;
...@@ -285,7 +285,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, ...@@ -285,7 +285,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
else else
list_del(&page->lru); list_del(&page->lru);
spin_unlock_bh(&mm->context.lock); spin_unlock_bh(&mm->context.lock);
table = (unsigned long *) (__pa(table) | (1U << bit)); table = (unsigned long *) ((unsigned long) table | (1U << bit));
tlb_remove_table(tlb, table); tlb_remove_table(tlb, table);
} }
...@@ -293,7 +293,7 @@ void __tlb_remove_table(void *_table) ...@@ -293,7 +293,7 @@ void __tlb_remove_table(void *_table)
{ {
unsigned int mask = (unsigned long) _table & 3; unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask); void *table = (void *)((unsigned long) _table ^ mask);
struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); struct page *page = virt_to_page(table);
switch (mask) { switch (mask) {
case 0: /* pmd, pud, or p4d */ case 0: /* pmd, pud, or p4d */
......
...@@ -27,14 +27,14 @@ static void __ref *vmem_alloc_pages(unsigned int order) ...@@ -27,14 +27,14 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available()) if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order); return (void *)__get_free_pages(GFP_KERNEL, order);
return (void *) memblock_phys_alloc(size, size); return memblock_alloc(size, size);
} }
static void vmem_free_pages(unsigned long addr, int order) static void vmem_free_pages(unsigned long addr, int order)
{ {
/* We don't expect boot memory to be removed ever. */ /* We don't expect boot memory to be removed ever. */
if (!slab_is_available() || if (!slab_is_available() ||
WARN_ON_ONCE(PageReserved(phys_to_page(addr)))) WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
return; return;
free_pages(addr, order); free_pages(addr, order);
} }
...@@ -57,7 +57,7 @@ pte_t __ref *vmem_pte_alloc(void) ...@@ -57,7 +57,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (slab_is_available()) if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm); pte = (pte_t *) page_table_alloc(&init_mm);
else else
pte = (pte_t *) memblock_phys_alloc(size, size); pte = (pte_t *) memblock_alloc(size, size);
if (!pte) if (!pte)
return NULL; return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
...@@ -85,7 +85,7 @@ static void vmemmap_flush_unused_sub_pmd(void) ...@@ -85,7 +85,7 @@ static void vmemmap_flush_unused_sub_pmd(void)
{ {
if (!unused_sub_pmd_start) if (!unused_sub_pmd_start)
return; return;
memset(__va(unused_sub_pmd_start), PAGE_UNUSED, memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
unused_sub_pmd_start = 0; unused_sub_pmd_start = 0;
} }
...@@ -98,7 +98,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end) ...@@ -98,7 +98,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
* getting removed (just in case the memmap never gets initialized, * getting removed (just in case the memmap never gets initialized,
* e.g., because the memory block never gets onlined). * e.g., because the memory block never gets onlined).
*/ */
memset(__va(start), 0, sizeof(struct page)); memset((void *)start, 0, sizeof(struct page));
} }
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
...@@ -119,7 +119,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) ...@@ -119,7 +119,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{ {
void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd(); vmemmap_flush_unused_sub_pmd();
...@@ -128,7 +128,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) ...@@ -128,7 +128,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */ /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
if (!IS_ALIGNED(start, PMD_SIZE)) if (!IS_ALIGNED(start, PMD_SIZE))
memset(page, PAGE_UNUSED, start - __pa(page)); memset((void *)page, PAGE_UNUSED, start - page);
/* /*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
* consecutive sections. Remember for the last added PMD the last * consecutive sections. Remember for the last added PMD the last
...@@ -141,11 +141,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) ...@@ -141,11 +141,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
/* Returns true if the PMD is completely unused and can be freed. */ /* Returns true if the PMD is completely unused and can be freed. */
static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end) static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
{ {
void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
vmemmap_flush_unused_sub_pmd(); vmemmap_flush_unused_sub_pmd();
memset(__va(start), PAGE_UNUSED, end - start); memset((void *)start, PAGE_UNUSED, end - start);
return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE); return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
} }
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
...@@ -166,7 +166,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, ...@@ -166,7 +166,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
if (pte_none(*pte)) if (pte_none(*pte))
continue; continue;
if (!direct) if (!direct)
vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0); vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
pte_clear(&init_mm, addr, pte); pte_clear(&init_mm, addr, pte);
} else if (pte_none(*pte)) { } else if (pte_none(*pte)) {
if (!direct) { if (!direct) {
...@@ -176,7 +176,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, ...@@ -176,7 +176,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
goto out; goto out;
pte_val(*pte) = __pa(new_page) | prot; pte_val(*pte) = __pa(new_page) | prot;
} else { } else {
pte_val(*pte) = addr | prot; pte_val(*pte) = __pa(addr) | prot;
} }
} else { } else {
continue; continue;
...@@ -201,7 +201,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start) ...@@ -201,7 +201,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start)
if (!pte_none(*pte)) if (!pte_none(*pte))
return; return;
} }
vmem_pte_free(__va(pmd_deref(*pmd))); vmem_pte_free((unsigned long *) pmd_deref(*pmd));
pmd_clear(pmd); pmd_clear(pmd);
} }
...@@ -242,7 +242,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr, ...@@ -242,7 +242,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
IS_ALIGNED(next, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE) &&
MACHINE_HAS_EDAT1 && addr && direct && MACHINE_HAS_EDAT1 && addr && direct &&
!debug_pagealloc_enabled()) { !debug_pagealloc_enabled()) {
pmd_val(*pmd) = addr | prot; pmd_val(*pmd) = __pa(addr) | prot;
pages++; pages++;
continue; continue;
} else if (!direct && MACHINE_HAS_EDAT1) { } else if (!direct && MACHINE_HAS_EDAT1) {
...@@ -338,7 +338,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -338,7 +338,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
IS_ALIGNED(next, PUD_SIZE) && IS_ALIGNED(next, PUD_SIZE) &&
MACHINE_HAS_EDAT2 && addr && direct && MACHINE_HAS_EDAT2 && addr && direct &&
!debug_pagealloc_enabled()) { !debug_pagealloc_enabled()) {
pud_val(*pud) = addr | prot; pud_val(*pud) = __pa(addr) | prot;
pages++; pages++;
continue; continue;
} }
......
...@@ -597,7 +597,7 @@ b9b3 cu42 RRE_RR ...@@ -597,7 +597,7 @@ b9b3 cu42 RRE_RR
b9bd trtre RRF_U0RR b9bd trtre RRF_U0RR
b9be srstu RRE_RR b9be srstu RRE_RR
b9bf trte RRF_U0RR b9bf trte RRF_U0RR
b9c0 selhhhr RRF_RURR b9c0 selfhr RRF_RURR
b9c8 ahhhr RRF_R0RR2 b9c8 ahhhr RRF_R0RR2
b9c9 shhhr RRF_R0RR2 b9c9 shhhr RRF_R0RR2
b9ca alhhhr RRF_R0RR2 b9ca alhhhr RRF_R0RR2
......
...@@ -117,7 +117,7 @@ struct virtio_rev_info { ...@@ -117,7 +117,7 @@ struct virtio_rev_info {
}; };
/* the highest virtio-ccw revision we support */ /* the highest virtio-ccw revision we support */
#define VIRTIO_CCW_REV_MAX 1 #define VIRTIO_CCW_REV_MAX 2
struct virtio_ccw_vq_info { struct virtio_ccw_vq_info {
struct virtqueue *vq; struct virtqueue *vq;
...@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev) ...@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
u8 old_status = vcdev->dma_area->status; u8 old_status = vcdev->dma_area->status;
struct ccw1 *ccw; struct ccw1 *ccw;
if (vcdev->revision < 1) if (vcdev->revision < 2)
return vcdev->dma_area->status; return vcdev->dma_area->status;
ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
......
...@@ -168,6 +168,7 @@ enum cpuhp_state { ...@@ -168,6 +168,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_CFD_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment