Commit ab159ac5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates and fixes from Michael Ellerman:
 "A bit of a mixture of things, tying up some loose ends.

  There's the removal of the nvlink code, which dependend on a commit in
  the vfio tree. Then the enablement of huge vmalloc which was in next
  for a few weeks but got dropped due to conflicts. And there's also a
  few fixes.

  Summary:

   - Remove the nvlink support now that it's only user has been removed.

   - Enable huge vmalloc mappings for Radix MMU (P9).

   - Fix KVM conversion to gfn-based MMU notifier callbacks.

   - Fix a kexec/kdump crash with hot plugged CPUs.

   - Fix boot failure on 32-bit with CONFIG_STACKPROTECTOR.

   - Restore alphabetic order of the selects under CONFIG_PPC.

  Thanks to: Christophe Leroy, Christoph Hellwig, Nicholas Piggin,
  Sandipan Das, and Sourabh Jain"

* tag 'powerpc-5.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  KVM: PPC: Book3S HV: Fix conversion to gfn-based MMU notifier callbacks
  powerpc/kconfig: Restore alphabetic order of the selects under CONFIG_PPC
  powerpc/32: Fix boot failure with CONFIG_STACKPROTECTOR
  powerpc/powernv/memtrace: Fix dcache flushing
  powerpc/kexec_file: Use current CPU info while setting up FDT
  powerpc/64s/radix: Enable huge vmalloc mappings
  powerpc/powernv: remove the nvlink support
parents fc858a52 f96271ce
...@@ -3282,6 +3282,8 @@ ...@@ -3282,6 +3282,8 @@
nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings. nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings.
nohugevmalloc [PPC] Disable kernel huge vmalloc mappings.
nosmt [KNL,S390] Disable symmetric multithreading (SMT). nosmt [KNL,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1. Equivalent to smt=1.
......
...@@ -120,28 +120,29 @@ config PPC ...@@ -120,28 +120,29 @@ config PPC
select ARCH_32BIT_OFF_T if PPC32 select ARCH_32BIT_OFF_T if PPC32
select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
select ARCH_HAS_HUGEPD if HUGETLB_PAGE select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN select ARCH_HAS_MEMREMAP_COMPAT_ALIGN
select ARCH_HAS_MMIOWB if PPC64 select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API select ARCH_HAS_PMEM_API
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION) select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK select ARCH_KEEP_MEMBLOCK
...@@ -164,9 +165,8 @@ config PPC ...@@ -164,9 +165,8 @@ config PPC
select BUILDTIME_TABLE_SORT select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS select CLONE_BACKWARDS
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select DMA_OPS if PPC64
select DMA_OPS_BYPASS if PPC64 select DMA_OPS_BYPASS if PPC64
select ARCH_HAS_DMA_MAP_DIRECT if PPC64 && PPC_PSERIES select DMA_OPS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT select EDAC_SUPPORT
...@@ -186,23 +186,22 @@ config PPC ...@@ -186,23 +186,22 @@ config PPC
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_TIME_NS select GENERIC_VDSO_TIME_NS
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB
select HAVE_ARCH_KFENCE if PPC32 select HAVE_ARCH_KFENCE if PPC32
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_NVRAM_OPS select HAVE_ARCH_NVRAM_OPS
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_C_RECORDMCOUNT
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64 select HAVE_CONTEXT_TRACKING if PPC64
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
...@@ -216,10 +215,13 @@ config PPC ...@@ -216,10 +215,13 @@ config PPC
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE select HAVE_IDE
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
select HAVE_KERNEL_LZO if DEFAULT_UIMAGE select HAVE_KERNEL_LZO if DEFAULT_UIMAGE
...@@ -231,26 +233,25 @@ config PPC ...@@ -231,26 +233,25 @@ config PPC
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP
select HAVE_OPTPROBES select HAVE_OPTPROBES
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
select MMU_GATHER_RCU_TABLE_FREE
select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_IRQ_TIME_ACCOUNTING select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
select HAVE_RSEQ
select IOMMU_HELPER if PPC64 select IOMMU_HELPER if PPC64
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select MMU_GATHER_PAGE_SIZE
select MMU_GATHER_RCU_TABLE_FREE
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
......
...@@ -210,7 +210,7 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, ...@@ -210,7 +210,7 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
unsigned int lpid); unsigned int lpid);
extern int kvmppc_radix_init(void); extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void); extern void kvmppc_radix_exit(void);
extern bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn); unsigned long gfn);
extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn); unsigned long gfn);
......
...@@ -28,9 +28,6 @@ extern struct device_node *opal_node; ...@@ -28,9 +28,6 @@ extern struct device_node *opal_node;
/* API functions */ /* API functions */
int64_t opal_invalid_call(void); int64_t opal_invalid_call(void);
int64_t opal_npu_destroy_context(uint64_t phb_id, uint64_t pid, uint64_t bdf);
int64_t opal_npu_init_context(uint64_t phb_id, int pasid, uint64_t msr,
uint64_t bdf);
int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid, int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid,
uint64_t lpcr); uint64_t lpcr);
int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn, int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn,
......
...@@ -126,7 +126,6 @@ struct pci_controller { ...@@ -126,7 +126,6 @@ struct pci_controller {
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
void *private_data; void *private_data;
struct npu *npu;
}; };
/* These are used for config access before all the PCI probing /* These are used for config access before all the PCI probing
......
...@@ -119,11 +119,4 @@ extern void pcibios_scan_phb(struct pci_controller *hose); ...@@ -119,11 +119,4 @@ extern void pcibios_scan_phb(struct pci_controller *hose);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
extern int pnv_npu2_init(struct pci_controller *hose);
extern int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
unsigned long msr);
extern int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev);
#endif /* __ASM_POWERPC_PCI_H */ #endif /* __ASM_POWERPC_PCI_H */
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/moduleloader.h> #include <linux/moduleloader.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <asm/module.h> #include <asm/module.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -88,17 +89,22 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -88,17 +89,22 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0; return 0;
} }
#ifdef MODULES_VADDR
static __always_inline void * static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end) __module_alloc(unsigned long size, unsigned long start, unsigned long end)
{ {
return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, /*
PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, * Don't do huge page allocations for modules yet until more testing
__builtin_return_address(0)); * is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
} }
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
#ifdef MODULES_VADDR
unsigned long limit = (unsigned long)_etext - SZ_32M; unsigned long limit = (unsigned long)_etext - SZ_32M;
void *ptr = NULL; void *ptr = NULL;
...@@ -112,5 +118,7 @@ void *module_alloc(unsigned long size) ...@@ -112,5 +118,7 @@ void *module_alloc(unsigned long size)
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END); ptr = __module_alloc(size, MODULES_VADDR, MODULES_END);
return ptr; return ptr;
} #else
return __module_alloc(size, VMALLOC_START, VMALLOC_END);
#endif #endif
}
...@@ -950,6 +950,93 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image) ...@@ -950,6 +950,93 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
return (unsigned int)(usm_entries * sizeof(u64)); return (unsigned int)(usm_entries * sizeof(u64));
} }
/**
* add_node_props - Reads node properties from device node structure and add
* them to fdt.
* @fdt: Flattened device tree of the kernel
* @node_offset: offset of the node to add a property at
* @dn: device node pointer
*
* Returns 0 on success, negative errno on error.
*/
static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
{
int ret = 0;
struct property *pp;
if (!dn)
return -EINVAL;
for_each_property_of_node(dn, pp) {
ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
if (ret < 0) {
pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
return ret;
}
}
return ret;
}
/**
* update_cpus_node - Update cpus node of flattened device tree using of_root
* device node.
* @fdt: Flattened device tree of the kernel.
*
* Returns 0 on success, negative errno on error.
*/
static int update_cpus_node(void *fdt)
{
struct device_node *cpus_node, *dn;
int cpus_offset, cpus_subnode_offset, ret = 0;
cpus_offset = fdt_path_offset(fdt, "/cpus");
if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
pr_err("Malformed device tree: error reading /cpus node: %s\n",
fdt_strerror(cpus_offset));
return cpus_offset;
}
if (cpus_offset > 0) {
ret = fdt_del_node(fdt, cpus_offset);
if (ret < 0) {
pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
return -EINVAL;
}
}
/* Add cpus node to fdt */
cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
if (cpus_offset < 0) {
pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
return -EINVAL;
}
/* Add cpus node properties */
cpus_node = of_find_node_by_path("/cpus");
ret = add_node_props(fdt, cpus_offset, cpus_node);
of_node_put(cpus_node);
if (ret < 0)
return ret;
/* Loop through all subnodes of cpus and add them to fdt */
for_each_node_by_type(dn, "cpu") {
cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
if (cpus_subnode_offset < 0) {
pr_err("Unable to add %s subnode: %s\n", dn->full_name,
fdt_strerror(cpus_subnode_offset));
ret = cpus_subnode_offset;
goto out;
}
ret = add_node_props(fdt, cpus_subnode_offset, dn);
if (ret < 0)
goto out;
}
out:
of_node_put(dn);
return ret;
}
/** /**
* setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
* being loaded. * being loaded.
...@@ -1006,6 +1093,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, ...@@ -1006,6 +1093,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
} }
} }
/* Update cpus nodes information to account hotplug CPUs. */
ret = update_cpus_node(fdt);
if (ret < 0)
goto out;
/* Update memory reserve map */ /* Update memory reserve map */
ret = get_reserved_memory_ranges(&rmem); ret = get_reserved_memory_ranges(&rmem);
if (ret) if (ret)
......
...@@ -795,7 +795,7 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, ...@@ -795,7 +795,7 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
} }
} }
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
unsigned long i; unsigned long i;
...@@ -829,15 +829,21 @@ static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -829,15 +829,21 @@ static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unlock_rmap(rmapp); unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0])); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} }
return false;
} }
bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
if (kvm_is_radix(kvm)) gfn_t gfn;
return kvm_unmap_radix(kvm, range->slot, range->start);
if (kvm_is_radix(kvm)) {
for (gfn = range->start; gfn < range->end; gfn++)
kvm_unmap_radix(kvm, range->slot, gfn);
} else {
for (gfn = range->start; gfn < range->end; gfn++)
kvm_unmap_rmapp(kvm, range->slot, range->start);
}
return kvm_unmap_rmapp(kvm, range->slot, range->start); return false;
} }
void kvmppc_core_flush_memslot_hv(struct kvm *kvm, void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
...@@ -924,10 +930,18 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -924,10 +930,18 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
if (kvm_is_radix(kvm)) gfn_t gfn;
kvm_age_radix(kvm, range->slot, range->start); bool ret = false;
return kvm_age_rmapp(kvm, range->slot, range->start); if (kvm_is_radix(kvm)) {
for (gfn = range->start; gfn < range->end; gfn++)
ret |= kvm_age_radix(kvm, range->slot, gfn);
} else {
for (gfn = range->start; gfn < range->end; gfn++)
ret |= kvm_age_rmapp(kvm, range->slot, gfn);
}
return ret;
} }
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
...@@ -965,18 +979,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -965,18 +979,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
if (kvm_is_radix(kvm)) WARN_ON(range->start + 1 != range->end);
kvm_test_age_radix(kvm, range->slot, range->start);
return kvm_test_age_rmapp(kvm, range->slot, range->start); if (kvm_is_radix(kvm))
return kvm_test_age_radix(kvm, range->slot, range->start);
else
return kvm_test_age_rmapp(kvm, range->slot, range->start);
} }
bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
WARN_ON(range->start + 1 != range->end);
if (kvm_is_radix(kvm)) if (kvm_is_radix(kvm))
return kvm_unmap_radix(kvm, range->slot, range->start); kvm_unmap_radix(kvm, range->slot, range->start);
else
kvm_unmap_rmapp(kvm, range->slot, range->start);
return kvm_unmap_rmapp(kvm, range->slot, range->start); return false;
} }
static int vcpus_running(struct kvm *kvm) static int vcpus_running(struct kvm *kvm)
......
...@@ -993,7 +993,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ...@@ -993,7 +993,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
} }
/* Called with kvm->mmu_lock held */ /* Called with kvm->mmu_lock held */
bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn) unsigned long gfn)
{ {
pte_t *ptep; pte_t *ptep;
...@@ -1002,14 +1002,13 @@ bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -1002,14 +1002,13 @@ bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
return false; return;
} }
ptep = find_kvm_secondary_pte(kvm, gpa, &shift); ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep)) if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid); kvm->arch.lpid);
return false;
} }
/* Called with kvm->mmu_lock held */ /* Called with kvm->mmu_lock held */
......
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_code-patching.o += -fno-stack-protector
CFLAGS_feature-fixups.o += -fno-stack-protector
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
......
...@@ -10,7 +10,7 @@ obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o ...@@ -10,7 +10,7 @@ obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_FA_DUMP) += opal-fadump.o obj-$(CONFIG_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_OPAL_CORE) += opal-core.o obj-$(CONFIG_OPAL_CORE) += opal-core.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o
obj-$(CONFIG_PCI_IOV) += pci-sriov.o obj-$(CONFIG_PCI_IOV) += pci-sriov.o
obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o obj-$(CONFIG_EEH) += eeh-powernv.o
......
...@@ -104,8 +104,8 @@ static void memtrace_clear_range(unsigned long start_pfn, ...@@ -104,8 +104,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
* Before we go ahead and use this range as cache inhibited range * Before we go ahead and use this range as cache inhibited range
* flush the cache. * flush the cache.
*/ */
flush_dcache_range_chunked(PFN_PHYS(start_pfn), flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
PFN_PHYS(start_pfn + nr_pages), (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
FLUSH_CHUNK_SIZE); FLUSH_CHUNK_SIZE);
} }
......
This diff is collapsed.
...@@ -267,8 +267,6 @@ OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE); ...@@ -267,8 +267,6 @@ OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE);
OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE); OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE);
OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE); OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE);
OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET);
OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR); OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);
OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT); OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT);
OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START); OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START);
......
...@@ -47,8 +47,7 @@ ...@@ -47,8 +47,7 @@
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000 #define PNV_IODA1_DMA32_SEGSIZE 0x10000000
static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK", static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" };
"NPU_OCAPI" };
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
static void pnv_pci_configure_bus(struct pci_bus *bus); static void pnv_pci_configure_bus(struct pci_bus *bus);
...@@ -192,8 +191,6 @@ void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) ...@@ -192,8 +191,6 @@ void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
unsigned int pe_num = pe->pe_number; unsigned int pe_num = pe->pe_number;
WARN_ON(pe->pdev); WARN_ON(pe->pdev);
WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
kfree(pe->npucomp);
memset(pe, 0, sizeof(struct pnv_ioda_pe)); memset(pe, 0, sizeof(struct pnv_ioda_pe));
mutex_lock(&phb->ioda.pe_alloc_mutex); mutex_lock(&phb->ioda.pe_alloc_mutex);
...@@ -875,7 +872,7 @@ int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) ...@@ -875,7 +872,7 @@ int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
* Release from all parents PELT-V. NPUs don't have a PELTV * Release from all parents PELT-V. NPUs don't have a PELTV
* table * table
*/ */
if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI) if (phb->type != PNV_PHB_NPU_OCAPI)
pnv_ioda_unset_peltv(phb, pe, parent); pnv_ioda_unset_peltv(phb, pe, parent);
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
...@@ -946,7 +943,7 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) ...@@ -946,7 +943,7 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
* Configure PELTV. NPUs don't have a PELTV table so skip * Configure PELTV. NPUs don't have a PELTV table so skip
* configuration on them. * configuration on them.
*/ */
if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI) if (phb->type != PNV_PHB_NPU_OCAPI)
pnv_ioda_set_peltv(phb, pe, true); pnv_ioda_set_peltv(phb, pe, true);
/* Setup reverse map */ /* Setup reverse map */
...@@ -1002,8 +999,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) ...@@ -1002,8 +999,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
/* NOTE: We don't get a reference for the pointer in the PE /* NOTE: We don't get a reference for the pointer in the PE
* data structure, both the device and PE structures should be * data structure, both the device and PE structures should be
* destroyed at the same time. However, removing nvlink * destroyed at the same time.
* devices will need some work.
* *
* At some point we want to remove the PDN completely anyways * At some point we want to remove the PDN completely anyways
*/ */
...@@ -1099,113 +1095,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) ...@@ -1099,113 +1095,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
return pe; return pe;
} }
static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
{
int pe_num, found_pe = false, rc;
long rid;
struct pnv_ioda_pe *pe;
struct pci_dev *gpu_pdev;
struct pci_dn *npu_pdn;
struct pnv_phb *phb = pci_bus_to_pnvhb(npu_pdev->bus);
/*
* Intentionally leak a reference on the npu device (for
* nvlink only; this is not an opencapi path) to make sure it
* never goes away, as it's been the case all along and some
* work is needed otherwise.
*/
pci_dev_get(npu_pdev);
/*
* Due to a hardware errata PE#0 on the NPU is reserved for
* error handling. This means we only have three PEs remaining
* which need to be assigned to four links, implying some
* links must share PEs.
*
* To achieve this we assign PEs such that NPUs linking the
* same GPU get assigned the same PE.
*/
gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
pe = &phb->ioda.pe_array[pe_num];
if (!pe->pdev)
continue;
if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
/*
* This device has the same peer GPU so should
* be assigned the same PE as the existing
* peer NPU.
*/
dev_info(&npu_pdev->dev,
"Associating to existing PE %x\n", pe_num);
npu_pdn = pci_get_pdn(npu_pdev);
rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
npu_pdn->pe_number = pe_num;
phb->ioda.pe_rmap[rid] = pe->pe_number;
pe->device_count++;
/* Map the PE to this link */
rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
OpalPciBusAll,
OPAL_COMPARE_RID_DEVICE_NUMBER,
OPAL_COMPARE_RID_FUNCTION_NUMBER,
OPAL_MAP_PE);
WARN_ON(rc != OPAL_SUCCESS);
found_pe = true;
break;
}
}
if (!found_pe)
/*
* Could not find an existing PE so allocate a new
* one.
*/
return pnv_ioda_setup_dev_PE(npu_pdev);
else
return pe;
}
static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
{
struct pci_dev *pdev;
list_for_each_entry(pdev, &bus->devices, bus_list)
pnv_ioda_setup_npu_PE(pdev);
}
static void pnv_pci_ioda_setup_nvlink(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
struct pnv_ioda_pe *pe;
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
if (phb->type == PNV_PHB_NPU_NVLINK) {
/* PE#0 is needed for error reporting */
pnv_ioda_reserve_pe(phb, 0);
pnv_ioda_setup_npu_PEs(hose->bus);
if (phb->model == PNV_PHB_MODEL_NPU2)
WARN_ON_ONCE(pnv_npu2_init(hose));
}
}
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
if (phb->type != PNV_PHB_IODA2)
continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list)
pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
}
#ifdef CONFIG_IOMMU_API
/* setup iommu groups so we can do nvlink pass-thru */
pnv_pci_npu_setup_iommu_groups();
#endif
}
static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe); struct pnv_ioda_pe *pe);
...@@ -1468,18 +1357,6 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { ...@@ -1468,18 +1357,6 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
{
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
mb(); /* Ensure previous TCE table stores are visible */
if (rm)
__raw_rm_writeq_be(val, invalidate);
else
__raw_writeq_be(val, invalidate);
}
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{ {
/* 01xb - invalidate TCEs that match the specified PE# */ /* 01xb - invalidate TCEs that match the specified PE# */
...@@ -1539,20 +1416,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, ...@@ -1539,20 +1416,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
struct pnv_phb *phb = pe->phb; struct pnv_phb *phb = pe->phb;
unsigned int shift = tbl->it_page_shift; unsigned int shift = tbl->it_page_shift;
/*
* NVLink1 can use the TCE kill register directly as
* it's the same as PHB3. NVLink2 is different and
* should go via the OPAL call.
*/
if (phb->model == PNV_PHB_MODEL_NPU) {
/*
* The NVLink hardware does not support TCE kill
* per TCE entry so we have to invalidate
* the entire cache for it.
*/
pnv_pci_phb3_tce_invalidate_entire(phb, rm);
continue;
}
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
pnv_pci_phb3_tce_invalidate(pe, rm, shift, pnv_pci_phb3_tce_invalidate(pe, rm, shift,
index, npages); index, npages);
...@@ -1564,14 +1427,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, ...@@ -1564,14 +1427,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
} }
} }
void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
{
if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
pnv_pci_phb3_tce_invalidate_entire(phb, rm);
else
opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
}
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction, enum dma_data_direction direction,
...@@ -2451,7 +2306,6 @@ static void pnv_pci_enable_bridges(void) ...@@ -2451,7 +2306,6 @@ static void pnv_pci_enable_bridges(void)
static void pnv_pci_ioda_fixup(void) static void pnv_pci_ioda_fixup(void)
{ {
pnv_pci_ioda_setup_nvlink();
pnv_pci_ioda_create_dbgfs(); pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges(); pnv_pci_enable_bridges();
...@@ -2824,15 +2678,6 @@ static void pnv_pci_release_device(struct pci_dev *pdev) ...@@ -2824,15 +2678,6 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
pnv_ioda_release_pe(pe); pnv_ioda_release_pe(pe);
} }
static void pnv_npu_disable_device(struct pci_dev *pdev)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
struct eeh_pe *eehpe = edev ? edev->pe : NULL;
if (eehpe && eeh_ops && eeh_ops->reset)
eeh_ops->reset(eehpe, EEH_RESET_HOT);
}
static void pnv_pci_ioda_shutdown(struct pci_controller *hose) static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{ {
struct pnv_phb *phb = hose->private_data; struct pnv_phb *phb = hose->private_data;
...@@ -2874,16 +2719,6 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { ...@@ -2874,16 +2719,6 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown, .shutdown = pnv_pci_ioda_shutdown,
}; };
static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
.enable_device_hook = pnv_pci_enable_device_hook,
.window_alignment = pnv_pci_window_alignment,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.shutdown = pnv_pci_ioda_shutdown,
.disable_device = pnv_npu_disable_device,
};
static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
.enable_device_hook = pnv_ocapi_enable_device_hook, .enable_device_hook = pnv_ocapi_enable_device_hook,
.release_device = pnv_pci_release_device, .release_device = pnv_pci_release_device,
...@@ -2957,10 +2792,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ...@@ -2957,10 +2792,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->model = PNV_PHB_MODEL_P7IOC; phb->model = PNV_PHB_MODEL_P7IOC;
else if (of_device_is_compatible(np, "ibm,power8-pciex")) else if (of_device_is_compatible(np, "ibm,power8-pciex"))
phb->model = PNV_PHB_MODEL_PHB3; phb->model = PNV_PHB_MODEL_PHB3;
else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
phb->model = PNV_PHB_MODEL_NPU;
else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
phb->model = PNV_PHB_MODEL_NPU2;
else else
phb->model = PNV_PHB_MODEL_UNKNOWN; phb->model = PNV_PHB_MODEL_UNKNOWN;
...@@ -3118,9 +2949,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, ...@@ -3118,9 +2949,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
switch (phb->type) { switch (phb->type) {
case PNV_PHB_NPU_NVLINK:
hose->controller_ops = pnv_npu_ioda_controller_ops;
break;
case PNV_PHB_NPU_OCAPI: case PNV_PHB_NPU_OCAPI:
hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops; hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
break; break;
...@@ -3173,11 +3001,6 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np) ...@@ -3173,11 +3001,6 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
} }
void __init pnv_pci_init_npu_phb(struct device_node *np)
{
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK);
}
void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np) void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
{ {
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI); pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
......
...@@ -926,17 +926,6 @@ void __init pnv_pci_init(void) ...@@ -926,17 +926,6 @@ void __init pnv_pci_init(void)
for_each_compatible_node(np, NULL, "ibm,ioda3-phb") for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
pnv_pci_init_ioda2_phb(np); pnv_pci_init_ioda2_phb(np);
/* Look for NPU PHBs */
for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
pnv_pci_init_npu_phb(np);
/*
* Look for NPU2 PHBs which we treat mostly as NPU PHBs with
* the exception of TCE kill which requires an OPAL call.
*/
for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
pnv_pci_init_npu_phb(np);
/* Look for NPU2 OpenCAPI PHBs */ /* Look for NPU2 OpenCAPI PHBs */
for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
pnv_pci_init_npu2_opencapi_phb(np); pnv_pci_init_npu2_opencapi_phb(np);
......
...@@ -10,10 +10,9 @@ ...@@ -10,10 +10,9 @@
struct pci_dn; struct pci_dn;
enum pnv_phb_type { enum pnv_phb_type {
PNV_PHB_IODA1 = 0, PNV_PHB_IODA1,
PNV_PHB_IODA2 = 1, PNV_PHB_IODA2,
PNV_PHB_NPU_NVLINK = 2, PNV_PHB_NPU_OCAPI,
PNV_PHB_NPU_OCAPI = 3,
}; };
/* Precise PHB model for error management */ /* Precise PHB model for error management */
...@@ -21,8 +20,6 @@ enum pnv_phb_model { ...@@ -21,8 +20,6 @@ enum pnv_phb_model {
PNV_PHB_MODEL_UNKNOWN, PNV_PHB_MODEL_UNKNOWN,
PNV_PHB_MODEL_P7IOC, PNV_PHB_MODEL_P7IOC,
PNV_PHB_MODEL_PHB3, PNV_PHB_MODEL_PHB3,
PNV_PHB_MODEL_NPU,
PNV_PHB_MODEL_NPU2,
}; };
#define PNV_PCI_DIAG_BUF_SIZE 8192 #define PNV_PCI_DIAG_BUF_SIZE 8192
...@@ -81,7 +78,6 @@ struct pnv_ioda_pe { ...@@ -81,7 +78,6 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
struct iommu_table_group table_group; struct iommu_table_group table_group;
struct npu_comp *npucomp;
/* 64-bit TCE bypass region */ /* 64-bit TCE bypass region */
bool tce_bypass_enabled; bool tce_bypass_enabled;
...@@ -289,9 +285,7 @@ extern struct iommu_table *pnv_pci_table_alloc(int nid); ...@@ -289,9 +285,7 @@ extern struct iommu_table *pnv_pci_table_alloc(int nid);
extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu_phb(struct device_node *np);
extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np); extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
...@@ -314,11 +308,6 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, ...@@ -314,11 +308,6 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
#define pe_info(pe, fmt, ...) \ #define pe_info(pe, fmt, ...) \
pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__) pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
/* Nvlink functions */
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
extern void pnv_pci_npu_setup_iommu_groups(void);
/* pci-ioda-tce.c */ /* pci-ioda-tce.c */
#define POWERNV_IOMMU_DEFAULT_LEVELS 2 #define POWERNV_IOMMU_DEFAULT_LEVELS 2
#define POWERNV_IOMMU_MAX_LEVELS 5 #define POWERNV_IOMMU_MAX_LEVELS 5
......
...@@ -224,8 +224,6 @@ static void __init pSeries_request_regions(void) ...@@ -224,8 +224,6 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void) void __init pSeries_final_fixup(void)
{ {
struct pci_controller *hose;
pSeries_request_regions(); pSeries_request_regions();
eeh_show_enabled(); eeh_show_enabled();
...@@ -234,27 +232,6 @@ void __init pSeries_final_fixup(void) ...@@ -234,27 +232,6 @@ void __init pSeries_final_fixup(void)
ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable; ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable;
ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable; ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable;
#endif #endif
list_for_each_entry(hose, &hose_list, list_node) {
struct device_node *dn = hose->dn, *nvdn;
while (1) {
dn = of_find_all_nodes(dn);
if (!dn)
break;
nvdn = of_parse_phandle(dn, "ibm,nvlink", 0);
if (!nvdn)
continue;
if (!of_device_is_compatible(nvdn, "ibm,npu-link"))
continue;
if (!of_device_is_compatible(nvdn->parent,
"ibm,power9-npu"))
continue;
#ifdef CONFIG_PPC_POWERNV
WARN_ON_ONCE(pnv_npu2_init(hose));
#endif
break;
}
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment