Commit 19be9e8a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux

Pull powerpc updates from Michael Ellerman:
 "There's some bug fixes or cleanups to facilitate fixes, a MAINTAINERS
  update, and a new syscall (bpf)"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux:
  powerpc/numa: ensure per-cpu NUMA mappings are correct on topology update
  powerpc/numa: use cached value of update->cpu in update_cpu_topology
  cxl: Fix PSL error due to duplicate segment table entries
  powerpc/mm: Use appropriate ESID mask in copro_calculate_slb()
  cxl: Refactor cxl_load_segment() and find_free_sste()
  cxl: Disable secondary hash in segment table
  Revert "powerpc/powernv: Fix endian bug in LPC bus debugfs accessors"
  powernv: Use _GLOBAL_TOC for opal wrappers
  powerpc: Wire up sys_bpf() syscall
  MAINTAINERS: nx-842 driver maintainer change
  powerpc/mm: Remove redundant #if case
  powerpc/mm: Fix build error with hugetlfs disabled
parents 9f76628d 2c0a33f9
...@@ -4608,7 +4608,7 @@ S: Supported ...@@ -4608,7 +4608,7 @@ S: Supported
F: drivers/crypto/nx/ F: drivers/crypto/nx/
IBM Power 842 compression accelerator IBM Power 842 compression accelerator
M: Nathan Fontenot <nfont@linux.vnet.ibm.com> M: Dan Streetman <ddstreet@us.ibm.com>
S: Supported S: Supported
F: drivers/crypto/nx/nx-842.c F: drivers/crypto/nx/nx-842.c
F: include/linux/nx842.h F: include/linux/nx842.h
......
...@@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, ...@@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
void flush_dcache_icache_hugepage(struct page *page); void flush_dcache_icache_hugepage(struct page *page);
#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) #if defined(CONFIG_PPC_MM_SLICES)
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len); unsigned long len);
#else #else
......
...@@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2) ...@@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2)
SYSCALL_SPU(seccomp) SYSCALL_SPU(seccomp)
SYSCALL_SPU(getrandom) SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create) SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#define __NR_syscalls 361 #define __NR_syscalls 362
#define __NR__exit __NR_exit #define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls #define NR_syscalls __NR_syscalls
......
...@@ -383,5 +383,6 @@ ...@@ -383,5 +383,6 @@
#define __NR_seccomp 358 #define __NR_seccomp 358
#define __NR_getrandom 359 #define __NR_getrandom 359
#define __NR_memfd_create 360 #define __NR_memfd_create 360
#define __NR_bpf 361
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
...@@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) ...@@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
u64 vsid; u64 vsid;
int psize, ssize; int psize, ssize;
slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
switch (REGION_ID(ea)) { switch (REGION_ID(ea)) {
case USER_REGION_ID: case USER_REGION_ID:
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
...@@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) ...@@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
vsid |= mmu_psize_defs[psize].sllp | vsid |= mmu_psize_defs[psize].sllp |
((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
slb->vsid = vsid; slb->vsid = vsid;
return 0; return 0;
......
...@@ -1509,11 +1509,14 @@ static int update_cpu_topology(void *data) ...@@ -1509,11 +1509,14 @@ static int update_cpu_topology(void *data)
cpu = smp_processor_id(); cpu = smp_processor_id();
for (update = data; update; update = update->next) { for (update = data; update; update = update->next) {
int new_nid = update->new_nid;
if (cpu != update->cpu) if (cpu != update->cpu)
continue; continue;
unmap_cpu_from_node(update->cpu); unmap_cpu_from_node(cpu);
map_cpu_to_node(update->cpu, update->new_nid); map_cpu_to_node(cpu, new_nid);
set_cpu_numa_node(cpu, new_nid);
set_cpu_numa_mem(cpu, local_memory_node(new_nid));
vdso_getcpu_init(); vdso_getcpu_init();
} }
......
...@@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, ...@@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
slice_convert(mm, mask, psize); slice_convert(mm, mask, psize);
} }
#ifdef CONFIG_HUGETLB_PAGE
/* /*
* is_hugepage_only_range() is used by generic code to verify whether * is_hugepage_only_range() is used by generic code to verify whether
* a normal mmap mapping (non hugetlbfs) is valid on a given area. * a normal mmap mapping (non hugetlbfs) is valid on a given area.
...@@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, ...@@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
#endif #endif
return !slice_check_fit(mask, available); return !slice_check_fit(mask, available);
} }
#endif
...@@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, ...@@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
{ {
struct lpc_debugfs_entry *lpc = filp->private_data; struct lpc_debugfs_entry *lpc = filp->private_data;
u32 data, pos, len, todo; u32 data, pos, len, todo;
__be32 bedata;
int rc; int rc;
if (!access_ok(VERIFY_WRITE, ubuf, count)) if (!access_ok(VERIFY_WRITE, ubuf, count))
...@@ -214,10 +213,9 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, ...@@ -214,10 +213,9 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
len = 2; len = 2;
} }
rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos,
&bedata, len); &data, len);
if (rc) if (rc)
return -ENXIO; return -ENXIO;
data = be32_to_cpu(bedata);
switch(len) { switch(len) {
case 4: case 4:
rc = __put_user((u32)data, (u32 __user *)ubuf); rc = __put_user((u32)data, (u32 __user *)ubuf);
......
...@@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1); \ ...@@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1); \
*/ */
#define OPAL_CALL(name, token) \ #define OPAL_CALL(name, token) \
_GLOBAL(name); \ _GLOBAL_TOC(name); \
mflr r0; \ mflr r0; \
std r0,16(r1); \ std r0,16(r1); \
li r0,token; \ li r0,token; \
......
...@@ -21,60 +21,64 @@ ...@@ -21,60 +21,64 @@
#include "cxl.h" #include "cxl.h"
static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
bool sec_hash,
struct cxl_sste *secondary_group,
unsigned int *lru)
{ {
unsigned int i, entry; return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
struct cxl_sste *sste, *group = primary_group; (sste->esid_data == cpu_to_be64(slb->esid)));
}
for (i = 0; i < 2; i++) {
for (entry = 0; entry < 8; entry++) { /*
sste = group + entry; * This finds a free SSTE for the given SLB, or returns NULL if it's already in
if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) * the segment table.
return sste; */
} static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
if (!sec_hash) struct copro_slb *slb)
break; {
group = secondary_group; struct cxl_sste *primary, *sste, *ret = NULL;
unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
unsigned int entry;
unsigned int hash;
if (slb->vsid & SLB_VSID_B_1T)
hash = (slb->esid >> SID_SHIFT_1T) & mask;
else /* 256M */
hash = (slb->esid >> SID_SHIFT) & mask;
primary = ctx->sstp + (hash << 3);
for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
ret = sste;
if (sste_matches(sste, slb))
return NULL;
} }
if (ret)
return ret;
/* Nothing free, select an entry to cast out */ /* Nothing free, select an entry to cast out */
if (sec_hash && (*lru & 0x8)) ret = primary + ctx->sst_lru;
sste = secondary_group + (*lru & 0x7); ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
else
sste = primary_group + (*lru & 0x7);
*lru = (*lru + 1) & 0xf;
return sste; return ret;
} }
static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
{ {
/* mask is the group index, we search primary and secondary here. */ /* mask is the group index, we search primary and secondary here. */
unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
bool sec_hash = 1;
struct cxl_sste *sste; struct cxl_sste *sste;
unsigned int hash;
unsigned long flags; unsigned long flags;
sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
if (slb->vsid & SLB_VSID_B_1T)
hash = (slb->esid >> SID_SHIFT_1T) & mask;
else /* 256M */
hash = (slb->esid >> SID_SHIFT) & mask;
spin_lock_irqsave(&ctx->sste_lock, flags); spin_lock_irqsave(&ctx->sste_lock, flags);
sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash, sste = find_free_sste(ctx, slb);
ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru); if (!sste)
goto out_unlock;
pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
sste - ctx->sstp, slb->vsid, slb->esid); sste - ctx->sstp, slb->vsid, slb->esid);
sste->vsid_data = cpu_to_be64(slb->vsid); sste->vsid_data = cpu_to_be64(slb->vsid);
sste->esid_data = cpu_to_be64(slb->esid); sste->esid_data = cpu_to_be64(slb->esid);
out_unlock:
spin_unlock_irqrestore(&ctx->sste_lock, flags); spin_unlock_irqrestore(&ctx->sste_lock, flags);
} }
......
...@@ -417,7 +417,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ...@@ -417,7 +417,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->elem->haurp = 0; /* disable */ ctx->elem->haurp = 0; /* disable */
ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
sr = CXL_PSL_SR_An_SC; sr = 0;
if (ctx->master) if (ctx->master)
sr |= CXL_PSL_SR_An_MP; sr |= CXL_PSL_SR_An_MP;
if (mfspr(SPRN_LPCR) & LPCR_TC) if (mfspr(SPRN_LPCR) & LPCR_TC)
...@@ -508,7 +508,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) ...@@ -508,7 +508,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
u64 sr; u64 sr;
int rc; int rc;
sr = CXL_PSL_SR_An_SC; sr = 0;
set_endian(sr); set_endian(sr);
if (ctx->master) if (ctx->master)
sr |= CXL_PSL_SR_An_MP; sr |= CXL_PSL_SR_An_MP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment