Commit 9726bfcd authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Tony Luck

misc/sgi-xp: remove SGI SN2 support

Note this also marks xp broken on ia64 now, as the UV support, which
was disable in generic kernels before actually never compiled due to
undefined uv_gpa_to_soc_phys_ram and uv_gpa_in_mmr_space symbols since
at least commit c2c9f115 ("x86: uv: update XPC to handle updated
BIOS interface").
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lkml.kernel.org/r/20190813072514.23299-11-hch@lst.deSigned-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 0fef2532
......@@ -19,12 +19,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
#ifdef CONFIG_IA64_SGI_UV
int sn_prom_type;
long sn_partition_id;
EXPORT_SYMBOL(sn_partition_id);
long sn_coherency_id;
EXPORT_SYMBOL_GPL(sn_coherency_id);
long sn_region_size;
EXPORT_SYMBOL(sn_region_size);
#endif
struct redir_addr {
......
......@@ -200,9 +200,8 @@ config ENCLOSURE_SERVICES
config SGI_XP
tristate "Support communication between SGI SSIs"
depends on NET
depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
depends on (IA64_GENERIC || IA64_SGI_UV || X86_UV) && SMP
depends on X86_64 || BROKEN
select SGI_GRU if X86_64 && SMP
---help---
An SGI machine can be divided into multiple Single System
......
......@@ -4,17 +4,10 @@
#
obj-$(CONFIG_SGI_XP) += xp.o
xp-y := xp_main.o
xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o
xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o
xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o
xp-$(CONFIG_X86_64) += xp_uv.o
xp-y := xp_main.o xp_uv.o
obj-$(CONFIG_SGI_XP) += xpc.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o
xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o
xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o
xpc-$(CONFIG_X86_64) += xpc_uv.o
xpc-y := xpc_main.o xpc_channel.o xpc_partition.o \
xpc_uv.o
obj-$(CONFIG_SGI_XP) += xpnet.o
......@@ -24,23 +24,6 @@
#define is_uv() 0
#endif
#if defined CONFIG_IA64
#include <asm/sn/arch.h> /* defines is_shub1() and is_shub2() */
#define is_shub() ia64_platform_is("sn2")
#endif
#ifndef is_shub1
#define is_shub1() 0
#endif
#ifndef is_shub2
#define is_shub2() 0
#endif
#ifndef is_shub
#define is_shub() 0
#endif
#ifdef USE_DBUG_ON
#define DBUG_ON(condition) BUG_ON(condition)
#else
......@@ -360,9 +343,7 @@ extern int xp_nofault_PIOR(void *);
extern int xp_error_PIOR(void);
extern struct device *xp;
extern enum xp_retval xp_init_sn2(void);
extern enum xp_retval xp_init_uv(void);
extern void xp_exit_sn2(void);
extern void xp_exit_uv(void);
#endif /* _DRIVERS_MISC_SGIXP_XP_H */
......@@ -233,9 +233,7 @@ xp_init(void)
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
if (is_shub())
ret = xp_init_sn2();
else if (is_uv())
if (is_uv())
ret = xp_init_uv();
else
ret = 0;
......@@ -251,9 +249,7 @@ module_init(xp_init);
void __exit
xp_exit(void)
{
if (is_shub())
xp_exit_sn2();
else if (is_uv())
if (is_uv())
xp_exit_uv();
}
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function
* will be registered as a nofault code block. In the event that the
* PIO read fails, the MCA handler will force the error to look
* corrected and vector to the xp_error_PIOR which will return an error.
*
* The definition of "consumption" and the time it takes for an MCA
* to surface is processor implementation specific. This code
* is sufficient on Itanium through the Montvale processor family.
* It may need to be adjusted for future processor implementations.
*
* extern int xp_nofault_PIOR(void *remote_register);
*/
.global xp_nofault_PIOR
xp_nofault_PIOR:
mov r8=r0 // Stage a success return value
ld8.acq r9=[r32];; // PIO Read the specified register
adds r9=1,r9;; // Add to force consumption
srlz.i;; // Allow time for MCA to surface
br.ret.sptk.many b0;; // Return success
.global xp_error_PIOR
xp_error_PIOR:
mov r8=1 // Return value of 1
br.ret.sptk.many b0;; // Return failure
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) sn2-based functions.
*
* Architecture specific implementation of common functions.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include "xp.h"
/*
* The export of xp_nofault_PIOR needs to happen here since it is defined
* in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
* defined here.
*/
EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
u64 xp_nofault_PIOR_target;
EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
/*
* Register a nofault code region which performs a cross-partition PIO read.
* If the PIO read times out, the MCA handler will consume the error and
* return to a kernel-provided instruction to indicate an error. This PIO read
* exists because it is guaranteed to timeout if the destination is down
* (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
* which unfortunately we have to work around).
*/
static enum xp_retval
xp_register_nofault_code_sn2(void)
{
int ret;
u64 func_addr;
u64 err_func_addr;
func_addr = *(u64 *)xp_nofault_PIOR;
err_func_addr = *(u64 *)xp_error_PIOR;
ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
1, 1);
if (ret != 0) {
dev_err(xp, "can't register nofault code, error=%d\n", ret);
return xpSalError;
}
/*
* Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.)
*/
if (is_shub1())
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
else if (is_shub2())
xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
return xpSuccess;
}
static void
xp_unregister_nofault_code_sn2(void)
{
u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *)xp_error_PIOR;
/* unregister the PIO read nofault code region */
(void)sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0);
}
/*
* Convert a virtual memory address to a physical memory address.
*/
static unsigned long
xp_pa_sn2(void *addr)
{
return __pa(addr);
}
/*
* Convert a global physical to a socket physical address.
*/
static unsigned long
xp_socket_pa_sn2(unsigned long gpa)
{
return gpa;
}
/*
* Wrapper for bte_copy().
*
* dst_pa - physical address of the destination of the transfer.
* src_pa - physical address of the source of the transfer.
* len - number of bytes to transfer from source to destination.
*
* Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
*/
static enum xp_retval
xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
size_t len)
{
bte_result_t ret;
ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (ret == BTE_SUCCESS)
return xpSuccess;
if (is_shub2()) {
dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
"0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
src_pa, len);
} else {
dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
"src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
}
return xpBteCopyError;
}
static int
xp_cpu_to_nasid_sn2(int cpuid)
{
return cpuid_to_nasid(cpuid);
}
static enum xp_retval
xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
{
u64 nasid_array = 0;
int ret;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
&nasid_array);
if (ret != 0) {
dev_err(xp, "sn_change_memprotect(,, "
"SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
return xpSalError;
}
return xpSuccess;
}
static enum xp_retval
xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
{
u64 nasid_array = 0;
int ret;
ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
&nasid_array);
if (ret != 0) {
dev_err(xp, "sn_change_memprotect(,, "
"SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
return xpSalError;
}
return xpSuccess;
}
enum xp_retval
xp_init_sn2(void)
{
BUG_ON(!is_shub());
xp_max_npartitions = XP_MAX_NPARTITIONS_SN2;
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;
xp_restrict_memprotect = xp_restrict_memprotect_sn2;
return xp_register_nofault_code_sn2();
}
void
xp_exit_sn2(void)
{
BUG_ON(!is_shub());
xp_unregister_nofault_code_sn2();
}
......@@ -151,9 +151,10 @@ xp_init_uv(void)
BUG_ON(!is_uv());
xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
#ifdef CONFIG_X86
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
#endif
xp_pa = xp_pa_uv;
xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
......
This diff is collapsed.
......@@ -279,13 +279,6 @@ xpc_hb_checker(void *ignore)
dev_dbg(xpc_part, "checking remote heartbeats\n");
xpc_check_remote_hb();
/*
* On sn2 we need to periodically recheck to ensure no
* IRQ/amo pairs have been missed.
*/
if (is_shub())
force_IRQ = 1;
}
/* check for outstanding IRQs */
......@@ -1050,9 +1043,7 @@ xpc_do_exit(enum xp_retval reason)
xpc_teardown_partitions();
if (is_shub())
xpc_exit_sn2();
else if (is_uv())
if (is_uv())
xpc_exit_uv();
}
......@@ -1235,21 +1226,7 @@ xpc_init(void)
dev_set_name(xpc_part, "part");
dev_set_name(xpc_chan, "chan");
if (is_shub()) {
/*
* The ia64-sn2 architecture supports at most 64 partitions.
* And the inability to unregister remote amos restricts us
* further to only support exactly 64 partitions on this
* architecture, no less.
*/
if (xp_max_npartitions != 64) {
dev_err(xpc_part, "max #of partitions not set to 64\n");
ret = -EINVAL;
} else {
ret = xpc_init_sn2();
}
} else if (is_uv()) {
if (is_uv()) {
ret = xpc_init_uv();
} else {
......@@ -1335,9 +1312,7 @@ xpc_init(void)
xpc_teardown_partitions();
out_1:
if (is_shub())
xpc_exit_sn2();
else if (is_uv())
if (is_uv())
xpc_exit_uv();
return ret;
}
......
......@@ -93,10 +93,6 @@ xpc_get_rsvd_page_pa(int nasid)
if (ret != xpNeedMoreInfo)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
if (is_shub())
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
......@@ -452,7 +448,6 @@ xpc_discovery(void)
case 32:
max_regions *= 2;
region_size = 16;
DBUG_ON(!is_shub2());
}
}
......
This diff is collapsed.
......@@ -48,6 +48,8 @@ struct uv_IO_APIC_route_entry {
__reserved_2 : 15,
dest : 32;
};
#define sn_partition_id 0
#endif
static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
......
......@@ -515,7 +515,7 @@ xpnet_init(void)
{
int result;
if (!is_shub() && !is_uv())
if (!is_uv())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment