Commit 6ff4d3e9 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Michael Ellerman

powerpc: Remove old unused icswx based coprocessor support

We have a whole pile of unused code to maintain the ACOP register,
allocate coprocessor PIDs and handle ACOP faults. This mechanism
was used for the HFI adapter on POWER7 which is dead and gone and
whose driver never went upstream. It was used on some A2 core based
stuff that also never saw the light of day.

Take out all that code.

There is still some POWER8 coprocessor code that uses icswx but it's
kernel only and thus doesn't use any of that infrastructure.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 8f5ca0b3
......@@ -96,11 +96,6 @@ typedef struct {
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
#ifdef CONFIG_PPC_ICSWX
struct spinlock *cop_lockp; /* guard acop and cop_pid */
unsigned long acop; /* mask of enabled coprocessor types */
unsigned int cop_pid; /* pid value used with coprocessors */
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
void *pte_frag;
......
......@@ -96,12 +96,6 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
if (prev == next)
return;
#ifdef CONFIG_PPC_ICSWX
/* Switch coprocessor context only if prev or next uses a coprocessor */
if (prev->context.acop || next->context.acop)
switch_cop(next);
#endif /* CONFIG_PPC_ICSWX */
/* We must stop all altivec streams before changing the HW
* context
*/
......
......@@ -221,10 +221,7 @@
#define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */
#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
#endif
#ifdef CONFIG_PPC_ICSWX
#define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */
#endif
/* Bit definitions for CCR1. */
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
......
......@@ -22,8 +22,6 @@ ifeq ($(CONFIG_PPC_STD_MMU_64),y)
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
endif
obj-$(CONFIG_PPC_ICSWX) += icswx.o
obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o
......
......@@ -45,8 +45,6 @@
#include <asm/siginfo.h>
#include <asm/debug.h>
#include "icswx.h"
static inline bool notify_page_fault(struct pt_regs *regs)
{
bool ret = false;
......@@ -389,19 +387,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
int fault, major = 0;
bool store_update_sp = false;
#ifdef CONFIG_PPC_ICSWX
/*
* we need to do this early because this "data storage
* interrupt" does not update the DAR/DEAR so we don't want to
* look at it
*/
if (error_code & ICSWX_DSI_UCT) {
int rc = acop_handle_fault(regs, address, error_code);
if (rc)
return rc;
}
#endif /* CONFIG_PPC_ICSWX */
if (notify_page_fault(regs))
return 0;
......
/*
* ICSWX and ACOP Management
*
* Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include "icswx.h"
/*
* The processor and its L2 cache cause the icswx instruction to
* generate a COP_REQ transaction on PowerBus. The transaction has no
* address, and the processor does not perform an MMU access to
* authenticate the transaction. The command portion of the PowerBus
* COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor
* Process ID (PID), which the coprocessor compares to the authorized
* LPID and PID held in the coprocessor, to determine if the process
* is authorized to generate the transaction. The data of the COP_REQ
* transaction is 128-byte or less in size and is placed in cacheable
* memory on a 128-byte cache line boundary.
*
* The task to use a coprocessor should use use_cop() to mark the use
* of the Coprocessor Type (CT) and context switching. On a server
* class processor, the PID register is used only for coprocessor
* management + * and so a coprocessor PID is allocated before
* executing icswx + * instruction. Drop_cop() is used to free the
* coprocessor PID.
*
* Example:
* Host Fabric Interface (HFI) is a PowerPC network coprocessor.
* Each HFI have multiple windows. Each HFI window serves as a
* network device sending to and receiving from HFI network.
* HFI immediate send function uses icswx instruction. The immediate
* send function allows small (single cache-line) packets be sent
* without using the regular HFI send FIFO and doorbell, which are
* much slower than immediate send.
*
* For each task intending to use HFI immediate send, the HFI driver
* calls use_cop() to obtain a coprocessor PID for the task.
* The HFI driver then allocate a free HFI window and save the
* coprocessor PID to the HFI window to allow the task to use the
* HFI window.
*
* The HFI driver repeatedly creates immediate send packets and
* issues icswx instruction to send data through the HFI window.
* The HFI compares the coprocessor PID in the CPU PID register
* to the PID held in the HFI window to determine if the transaction
* is allowed.
*
* When the task to release the HFI window, the HFI driver calls
* drop_cop() to release the coprocessor PID.
*/
void switch_cop(struct mm_struct *next)
{
#ifdef CONFIG_PPC_ICSWX_PID
mtspr(SPRN_PID, next->context.cop_pid);
#endif
mtspr(SPRN_ACOP, next->context.acop);
}
/**
* Start using a coprocessor.
* @acop: mask of coprocessor to be used.
* @mm: The mm the coprocessor to associate with. Most likely current mm.
*
* Return a positive PID if successful. Negative errno otherwise.
* The returned PID will be fed to the coprocessor to determine if an
* icswx transaction is authenticated.
*/
int use_cop(unsigned long acop, struct mm_struct *mm)
{
int ret;
if (!cpu_has_feature(CPU_FTR_ICSWX))
return -ENODEV;
if (!mm || !acop)
return -EINVAL;
/* The page_table_lock ensures mm_users won't change under us */
spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
ret = get_cop_pid(mm);
if (ret < 0)
goto out;
/* update acop */
mm->context.acop |= acop;
sync_cop(mm);
/*
* If this is a threaded process then there might be other threads
* running. We need to send an IPI to force them to pick up any
* change in PID and ACOP.
*/
if (atomic_read(&mm->mm_users) > 1)
smp_call_function(sync_cop, mm, 1);
out:
spin_unlock(mm->context.cop_lockp);
spin_unlock(&mm->page_table_lock);
return ret;
}
EXPORT_SYMBOL_GPL(use_cop);
/**
* Stop using a coprocessor.
* @acop: mask of coprocessor to be stopped.
* @mm: The mm the coprocessor associated with.
*/
void drop_cop(unsigned long acop, struct mm_struct *mm)
{
int free_pid;
if (!cpu_has_feature(CPU_FTR_ICSWX))
return;
if (WARN_ON_ONCE(!mm))
return;
/* The page_table_lock ensures mm_users won't change under us */
spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
mm->context.acop &= ~acop;
free_pid = disable_cop_pid(mm);
sync_cop(mm);
/*
* If this is a threaded process then there might be other threads
* running. We need to send an IPI to force them to pick up any
* change in PID and ACOP.
*/
if (atomic_read(&mm->mm_users) > 1)
smp_call_function(sync_cop, mm, 1);
if (free_pid != COP_PID_NONE)
free_cop_pid(free_pid);
spin_unlock(mm->context.cop_lockp);
spin_unlock(&mm->page_table_lock);
}
EXPORT_SYMBOL_GPL(drop_cop);
static int acop_use_cop(int ct)
{
/* There is no alternate policy, yet */
return -1;
}
/*
* Get the instruction word at the NIP
*/
static u32 acop_get_inst(struct pt_regs *regs)
{
u32 inst;
u32 __user *p;
p = (u32 __user *)regs->nip;
if (!access_ok(VERIFY_READ, p, sizeof(*p)))
return 0;
if (__get_user(inst, p))
return 0;
return inst;
}
/**
* @regs: registers at time of interrupt
* @address: storage address
* @error_code: Fault code, usually the DSISR or ESR depending on
* processor type
*
* Return 0 if we are able to resolve the data storage fault that
* results from a CT miss in the ACOP register.
*/
int acop_handle_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
int ct;
u32 inst = 0;
if (!cpu_has_feature(CPU_FTR_ICSWX)) {
pr_info("No coprocessors available");
_exception(SIGILL, regs, ILL_ILLOPN, address);
}
if (!user_mode(regs)) {
/* this could happen if the HV denies the
* kernel access, for now we just die */
die("ICSWX from kernel failed", regs, SIGSEGV);
}
/* Some implementations leave us a hint for the CT */
ct = ICSWX_GET_CT_HINT(error_code);
if (ct < 0) {
/* we have to peek at the instruction word to figure out CT */
u32 ccw;
u32 rs;
inst = acop_get_inst(regs);
if (inst == 0)
return -1;
rs = (inst >> (31 - 10)) & 0x1f;
ccw = regs->gpr[rs];
ct = (ccw >> 16) & 0x3f;
}
/*
* We could be here because another thread has enabled acop
* but the ACOP register has yet to be updated.
*
* This should have been taken care of by the IPI to sync all
* the threads (see smp_call_function(sync_cop, mm, 1)), but
* that could take forever if there are a significant amount
* of threads.
*
* Given the number of threads on some of these systems,
* perhaps this is the best way to sync ACOP rather than whack
* every thread with an IPI.
*/
if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
sync_cop(current->active_mm);
return 0;
}
/* check for alternate policy */
if (!acop_use_cop(ct))
return 0;
/* at this point the CT is unknown to the system */
pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
current->comm, current->pid, ct);
/* get inst if we don't already have it */
if (inst == 0) {
inst = acop_get_inst(regs);
if (inst == 0)
return -1;
}
/* Check if the instruction is the "record form" */
if (inst & 1) {
/*
* the instruction is "record" form so we can reject
* using CR0
*/
regs->ccr &= ~(0xful << 28);
regs->ccr |= ICSWX_RC_NOT_FOUND << 28;
/* Move on to the next instruction */
regs->nip += 4;
} else {
/*
* There is no architected mechanism to report a bad
* CT so we could either SIGILL or report nothing.
* Since the non-record version should only bu used
* for "hints" or "don't care" we should probably do
* nothing. However, I could see how some people
* might want an SIGILL so it here if you want it.
*/
#ifdef CONFIG_PPC_ICSWX_USE_SIGILL
_exception(SIGILL, regs, ILL_ILLOPN, address);
#else
regs->nip += 4;
#endif
}
return 0;
}
EXPORT_SYMBOL_GPL(acop_handle_fault);
#ifndef _ARCH_POWERPC_MM_ICSWX_H_
#define _ARCH_POWERPC_MM_ICSWX_H_
/*
* ICSWX and ACOP Management
*
* Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <asm/mmu_context.h>
/* also used to denote that PIDs are not used */
#define COP_PID_NONE 0
static inline void sync_cop(void *arg)
{
struct mm_struct *mm = arg;
if (mm == current->active_mm)
switch_cop(current->active_mm);
}
#ifdef CONFIG_PPC_ICSWX_PID
extern int get_cop_pid(struct mm_struct *mm);
extern int disable_cop_pid(struct mm_struct *mm);
extern void free_cop_pid(int free_pid);
#else
#define get_cop_pid(m) (COP_PID_NONE)
#define disable_cop_pid(m) (COP_PID_NONE)
#define free_cop_pid(p)
#endif
/*
* These are implementation bits for architected registers. If this
* ever becomes architecture the should be moved to reg.h et. al.
*/
/* UCT is the same bit for Server and Embedded */
#define ICSWX_DSI_UCT 0x00004000 /* Unavailable Coprocessor Type */
#ifdef CONFIG_PPC_BOOK3E
/* Embedded implementation gives us no hints as to what the CT is */
#define ICSWX_GET_CT_HINT(x) (-1)
#else
/* Server implementation contains the CT value in the DSISR */
#define ICSWX_DSISR_CTMASK 0x00003f00
#define ICSWX_GET_CT_HINT(x) (((x) & ICSWX_DSISR_CTMASK) >> 8)
#endif
#define ICSWX_RC_STARTED 0x8 /* The request has been started */
#define ICSWX_RC_NOT_IDLE 0x4 /* No coprocessor found idle */
#define ICSWX_RC_NOT_FOUND 0x2 /* No coprocessor found */
#define ICSWX_RC_UNDEFINED 0x1 /* Reserved */
extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
static inline u64 acop_copro_type_bit(unsigned int type)
{
return 1ULL << (63 - type);
}
#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
/*
* ICSWX and ACOP/PID Management
*
* Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/module.h>
#include "icswx.h"
#define COP_PID_MIN (COP_PID_NONE + 1)
#define COP_PID_MAX (0xFFFF)
static DEFINE_SPINLOCK(mmu_context_acop_lock);
static DEFINE_IDA(cop_ida);
static int new_cop_pid(struct ida *ida, int min_id, int max_id,
spinlock_t *lock)
{
int index;
int err;
again:
if (!ida_pre_get(ida, GFP_KERNEL))
return -ENOMEM;
spin_lock(lock);
err = ida_get_new_above(ida, min_id, &index);
spin_unlock(lock);
if (err == -EAGAIN)
goto again;
else if (err)
return err;
if (index > max_id) {
spin_lock(lock);
ida_remove(ida, index);
spin_unlock(lock);
return -ENOMEM;
}
return index;
}
int get_cop_pid(struct mm_struct *mm)
{
int pid;
if (mm->context.cop_pid == COP_PID_NONE) {
pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
&mmu_context_acop_lock);
if (pid >= 0)
mm->context.cop_pid = pid;
}
return mm->context.cop_pid;
}
int disable_cop_pid(struct mm_struct *mm)
{
int free_pid = COP_PID_NONE;
if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
free_pid = mm->context.cop_pid;
mm->context.cop_pid = COP_PID_NONE;
}
return free_pid;
}
void free_cop_pid(int free_pid)
{
spin_lock(&mmu_context_acop_lock);
ida_remove(&cop_ida, free_pid);
spin_unlock(&mmu_context_acop_lock);
}
......@@ -25,8 +25,6 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include "icswx.h"
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
......@@ -164,16 +162,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return index;
mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX
mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!mm->context.cop_lockp) {
__destroy_context(index);
subpage_prot_free(mm);
mm->context.id = MMU_NO_CONTEXT;
return -ENOMEM;
}
spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
mm->context.pte_frag = NULL;
......@@ -225,12 +213,6 @@ void destroy_context(struct mm_struct *mm)
#ifdef CONFIG_SPAPR_TCE_IOMMU
WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
#endif
#ifdef CONFIG_PPC_ICSWX
drop_cop(mm->context.acop, mm);
kfree(mm->context.cop_lockp);
mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
if (radix_enabled()) {
/*
* Radix doesn't have a valid bit in the process table
......
......@@ -271,44 +271,6 @@ config VSX
If in doubt, say Y here.
config PPC_ICSWX
bool "Support for PowerPC icswx coprocessor instruction"
depends on PPC_BOOK3S_64
default n
---help---
This option enables kernel support for the PowerPC Initiate
Coprocessor Store Word (icswx) coprocessor instruction on POWER7
and POWER8 processors. POWER9 uses new copy/paste instructions
to invoke the coprocessor.
This option is only useful if you have a processor that supports
the icswx coprocessor instruction. It does not have any effect
on processors without the icswx coprocessor instruction.
This option slightly increases kernel memory usage.
If in doubt, say N here.
config PPC_ICSWX_PID
bool "icswx requires direct PID management"
depends on PPC_ICSWX
default y
---help---
The PID register in server is used explicitly for ICSWX. In
embedded systems PID management is done by the system.
config PPC_ICSWX_USE_SIGILL
bool "Should a bad CT cause a SIGILL?"
depends on PPC_ICSWX
default n
---help---
Should a bad CT used for "non-record form ICSWX" cause an
illegal instruction signal or should it be silent as
architected.
If in doubt, say N here.
config SPE_POSSIBLE
def_bool y
depends on E200 || (E500 && !PPC_E500MC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment