Commit d44aa798 authored by Jack Steiner's avatar Jack Steiner Committed by Tony Luck

[IA64-SGI] Add support for a future SGI chipset (shub2) 3of4

Change the IPI & TLB flushing code so that it works on
both shub1 & shub2.
Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
parent 2ce29370
......@@ -8,9 +8,8 @@
#include <asm/sn/shub_mmr.h>
#define ZEROVAL 0x3f // "zero" value for outstanding PIO requests
#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
#define WRITECOUNT SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT
#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
#define ALIAS_OFFSET (SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0)
......@@ -18,24 +17,24 @@
.proc sn2_ptc_deadlock_recovery_core
sn2_ptc_deadlock_recovery_core:
.regstk 5,0,0,0
.regstk 6,0,0,0
ptc0 = in0
data0 = in1
ptc1 = in2
data1 = in3
piowc = in4
zeroval = in5
piowcphy = r30
psrsave = r2
zeroval = r3
scr1 = r16
scr2 = r17
mask = r18
extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
dep piowcphy=-1,piowcphy,63,1
mov zeroval=ZEROVAL // "zero" value for PIO write count
movl mask=WRITECOUNTMASK
1:
add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
......@@ -43,7 +42,7 @@ sn2_ptc_deadlock_recovery_core:
st8.rel [scr2]=scr1;;
5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
extr.u scr2=scr1,WRITECOUNT,7;;// PIO count
and scr2=scr1,mask;; // mask of writecount bits
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b
......@@ -57,16 +56,17 @@ sn2_ptc_deadlock_recovery_core:
st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
extr.u scr2=scr1,WRITECOUNT,7;;// PIO count
and scr2=scr1,mask;; // mask of writecount bits
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b;;
tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
extr.u scr2=scr1,WRITECOUNT,7;;// PIO count
and scr2=scr1,mask;; // mask of writecount bits
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b
......
......@@ -38,7 +38,8 @@
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1);
void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0,
volatile unsigned long *, unsigned long data1);
static spinlock_t sn2_global_ptc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
......@@ -46,14 +47,14 @@ static unsigned long sn2_ptc_deadlock_count;
static inline unsigned long wait_piowc(void)
{
volatile unsigned long *piows, piows_val;
volatile unsigned long *piows, zeroval;
unsigned long ws;
piows = pda->pio_write_status_addr;
piows_val = pda->pio_write_status_val;
zeroval = pda->pio_write_status_val;
do {
cpu_relax();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != piows_val);
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
return ws;
}
......@@ -87,9 +88,9 @@ void
sn2_global_tlb_purge(unsigned long start, unsigned long end,
unsigned long nbits)
{
int i, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
volatile unsigned long *ptc0, *ptc1;
unsigned long flags = 0, data0, data1;
unsigned long flags = 0, data0 = 0, data1 = 0;
struct mm_struct *mm = current->active_mm;
short nasids[NR_NODES], nix;
DECLARE_BITMAP(nodes_flushed, NR_NODES);
......@@ -128,28 +129,42 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
cnode = find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
nasids[nix++] = cnodeid_to_nasid(cnode);
data0 = (1UL << SH1_PTC_0_A_SHFT) |
(nbits << SH1_PTC_0_PS_SHFT) |
((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
(1UL << SH1_PTC_0_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
shub1 = is_shub1();
if (shub1) {
data0 = (1UL << SH1_PTC_0_A_SHFT) |
(nbits << SH1_PTC_0_PS_SHFT) |
((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
(1UL << SH1_PTC_0_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
} else {
data0 = (1UL << SH2_PTC_A_SHFT) |
(nbits << SH2_PTC_PS_SHFT) |
(1UL << SH2_PTC_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
((ia64_get_rr(start) >> 8) << SH2_PTC_RID_SHFT) );
ptc1 = NULL;
}
mynasid = get_nasid();
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
do {
data1 = start | (1UL << SH1_PTC_1_START_SHFT);
if (shub1)
data1 = start | (1UL << SH1_PTC_1_START_SHFT);
else
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
for (i = 0; i < nix; i++) {
nasid = nasids[i];
if (likely(nasid == mynasid)) {
if (unlikely(nasid == mynasid)) {
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
ptc1 = CHANGE_NASID(nasid, ptc1);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
data1);
flushed = 1;
......@@ -159,7 +174,7 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
if (flushed
&& (wait_piowc() &
SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) {
sn2_ptc_deadlock_recovery(data0, data1);
sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1);
}
start += (1UL << nbits);
......@@ -178,18 +193,19 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end,
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0,
volatile unsigned long *ptc1, unsigned long data1)
{
extern void sn2_ptc_deadlock_recovery_core(long *, long, long *, long,
long *);
extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
int cnode, mycnode, nasid;
long *ptc0, *ptc1, *piows;
volatile unsigned long *piows;
volatile unsigned long zeroval;
sn2_ptc_deadlock_count++;
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
piows = (long *)pda->pio_write_status_addr;
piows = pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
mycnode = numa_node_id();
......@@ -198,8 +214,9 @@ void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
continue;
nasid = cnodeid_to_nasid(cnode);
ptc0 = CHANGE_NASID(nasid, ptc0);
ptc1 = CHANGE_NASID(nasid, ptc1);
sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
}
}
......
......@@ -14,8 +14,8 @@
* uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 2 MMRs with psr.ic=0
* (interrupt collection)
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
* Second MMR will be skipped if address is NULL
*
* Addresses passed to these routines should be uncached physical addresses
* ie., 0x80000....
......@@ -61,13 +61,14 @@ pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2,
asm volatile
("mov r2=psr;;"
"rsm psr.i | psr.dt | psr.ic;;"
"cmp.ne p9,p0=%2,r0;"
"srlz.i;;"
"st8.rel [%0]=%1;"
"st8.rel [%2]=%3;;"
"(p9) st8.rel [%2]=%3;;"
"mov psr.l=r2;;"
"srlz.i;;"
:: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2)
: "r2", "memory");
: "p9", "r2", "memory");
}
#endif /* _ASM_IA64_SN_RW_MMR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment