Commit 7f613c7d authored by Keith Owens's avatar Keith Owens Committed by Tony Luck

[PATCH] MCA/INIT: use per cpu stacks

The bulk of the change.  Use per cpu MCA/INIT stacks.  Change the SAL
to OS state (sos) to be per process.  Do all the assembler work on the
MCA/INIT stacks, leaving the original stack alone.  Pass per cpu state
data to the C handlers for MCA and INIT, which also means changing the
mca_drv interfaces slightly.  Lots of verification on whether the
original stack is usable before converting it to a sleeping process.
Signed-off-by: default avatarKeith Owens <kaos@sgi.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 289d773e
...@@ -211,17 +211,41 @@ void foo(void) ...@@ -211,17 +211,41 @@ void foo(void)
#endif #endif
BLANK(); BLANK();
DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, proc_state_dump)); offsetof (struct ia64_mca_cpu, mca_stack));
DEFINE(IA64_MCA_CPU_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, stack));
DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET,
offsetof (struct ia64_mca_cpu, stackframe));
DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET,
offsetof (struct ia64_mca_cpu, rbstore));
DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack)); offsetof (struct ia64_mca_cpu, init_stack));
BLANK(); BLANK();
DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
offsetof (struct ia64_sal_os_state, sal_ra));
DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
offsetof (struct ia64_sal_os_state, os_gp));
DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
offsetof (struct ia64_sal_os_state, pal_min_state));
DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
offsetof (struct ia64_sal_os_state, proc_state_param));
DEFINE(IA64_SAL_OS_STATE_SIZE,
sizeof (struct ia64_sal_os_state));
DEFINE(IA64_PMSA_GR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_gr));
DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
DEFINE(IA64_PMSA_PR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_pr));
DEFINE(IA64_PMSA_BR0_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_br0));
DEFINE(IA64_PMSA_RSC_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_rsc));
DEFINE(IA64_PMSA_IIP_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_iip));
DEFINE(IA64_PMSA_IPSR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_ipsr));
DEFINE(IA64_PMSA_IFS_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_ifs));
DEFINE(IA64_PMSA_XIP_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_xip));
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
......
This diff is collapsed.
This diff is collapsed.
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* *
* Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -38,10 +40,6 @@ ...@@ -38,10 +40,6 @@
/* max size of SAL error record (default) */ /* max size of SAL error record (default) */
static int sal_rec_max = 10000; static int sal_rec_max = 10000;
/* from mca.c */
static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state;
static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state;
/* from mca_drv_asm.S */ /* from mca_drv_asm.S */
extern void *mca_handler_bhhook(void); extern void *mca_handler_bhhook(void);
...@@ -316,7 +314,8 @@ init_record_index_pools(void) ...@@ -316,7 +314,8 @@ init_record_index_pools(void)
*/ */
static mca_type_t static mca_type_t
is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
...@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) ...@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
* Therefore it is local MCA when rendezvous has not been requested. * Therefore it is local MCA when rendezvous has not been requested.
* Failed to rendezvous, the system must be down. * Failed to rendezvous, the system must be down.
*/ */
switch (sal_to_os_handoff_state->imsto_rendez_state) { switch (sos->rv_rc) {
case -1: /* SAL rendezvous unsuccessful */ case -1: /* SAL rendezvous unsuccessful */
return MCA_IS_GLOBAL; return MCA_IS_GLOBAL;
case 0: /* SAL rendezvous not required */ case 0: /* SAL rendezvous not required */
...@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) ...@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci)
*/ */
static int static int
recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
sal_log_mod_error_info_t *smei; sal_log_mod_error_info_t *smei;
pal_min_state_area_t *pmsa; pal_min_state_area_t *pmsa;
...@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec ...@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
* setup for resume to bottom half of MCA, * setup for resume to bottom half of MCA,
* "mca_handler_bhhook" * "mca_handler_bhhook"
*/ */
pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); pmsa = sos->pal_min_state;
/* pass to bhhook as 1st argument (gr8) */ /* pass to bhhook as 1st argument (gr8) */
pmsa->pmsa_gr[8-1] = smei->target_identifier; pmsa->pmsa_gr[8-1] = smei->target_identifier;
/* set interrupted return address (but no use) */ /* set interrupted return address (but no use) */
...@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec ...@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
*/ */
static int static int
recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
int status = 0; int status = 0;
pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
...@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ ...@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
case 1: /* partial read */ case 1: /* partial read */
case 3: /* full line(cpu) read */ case 3: /* full line(cpu) read */
case 9: /* I/O space read */ case 9: /* I/O space read */
status = recover_from_read_error(slidx, peidx, pbci); status = recover_from_read_error(slidx, peidx, pbci, sos);
break; break;
case 0: /* unknown */ case 0: /* unknown */
case 2: /* partial write */ case 2: /* partial write */
...@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ ...@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_
*/ */
static int static int
recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{ {
pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
...@@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * ...@@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
* This means "there are some platform errors". * This means "there are some platform errors".
*/ */
if (platform) if (platform)
return recover_from_platform_error(slidx, peidx, pbci); return recover_from_platform_error(slidx, peidx, pbci, sos);
/* /*
* On account of strange SAL error record, we cannot recover. * On account of strange SAL error record, we cannot recover.
*/ */
...@@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * ...@@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *
static int static int
mca_try_to_recover(void *rec, mca_try_to_recover(void *rec,
ia64_mca_sal_to_os_state_t *sal_to_os_state, struct ia64_sal_os_state *sos)
ia64_mca_os_to_sal_state_t *os_to_sal_state)
{ {
int platform_err; int platform_err;
int n_proc_err; int n_proc_err;
...@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec, ...@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec,
peidx_table_t peidx; peidx_table_t peidx;
pal_bus_check_info_t pbci; pal_bus_check_info_t pbci;
/* handoff state from/to mca.c */
sal_to_os_handoff_state = sal_to_os_state;
os_to_sal_handoff_state = os_to_sal_state;
/* Make index of SAL error record */ /* Make index of SAL error record */
platform_err = mca_make_slidx(rec, &slidx); platform_err = mca_make_slidx(rec, &slidx);
...@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec, ...@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec,
*((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
/* Check whether MCA is global or not */ /* Check whether MCA is global or not */
if (is_mca_global(&peidx, &pbci)) if (is_mca_global(&peidx, &pbci, sos))
return 0; return 0;
/* Try to recover a processor error */ /* Try to recover a processor error */
return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos);
} }
/* /*
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#ifndef _ASM_IA64_MCA_H #ifndef _ASM_IA64_MCA_H
#define _ASM_IA64_MCA_H #define _ASM_IA64_MCA_H
#define IA64_MCA_STACK_SIZE 8192
#if !defined(__ASSEMBLY__) #if !defined(__ASSEMBLY__)
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -48,7 +46,8 @@ typedef union cmcv_reg_u { ...@@ -48,7 +46,8 @@ typedef union cmcv_reg_u {
enum { enum {
IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1,
IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2,
}; };
/* Information maintained by the MC infrastructure */ /* Information maintained by the MC infrastructure */
...@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { ...@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s {
} ia64_mc_info_t; } ia64_mc_info_t;
typedef struct ia64_mca_sal_to_os_state_s { /* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
u64 imsto_os_gp; /* GP of the os registered with the SAL */ * Besides the handover state, it also contains some saved registers from the
u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ * time of the event.
u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ * Note: mca_asm.S depends on the precise layout of this structure.
u64 imsto_sal_gp; /* GP of the SAL - physical */ */
u64 imsto_rendez_state; /* Rendez state information */
u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going struct ia64_sal_os_state {
* back to SAL from OS after MCA handling. /* SAL to OS, must be at offset 0 */
*/ u64 os_gp; /* GP of the os registered with the SAL, physical */
u64 pal_min_state; /* from PAL in r17 */ u64 pal_proc; /* PAL_PROC entry point, physical */
u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ u64 sal_proc; /* SAL_PROC entry point, physical */
} ia64_mca_sal_to_os_state_t; u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */
u64 proc_state_param; /* from R18 */
u64 monarch; /* 1 for a monarch event, 0 for a slave */
/* common, must follow SAL to OS */
u64 sal_ra; /* Return address in SAL, physical */
u64 sal_gp; /* GP of the SAL - physical */
pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
struct task_struct *prev_task; /* previous task, NULL if it is not useful */
/* Some interrupt registers are not saved in minstate, pt_regs or
* switch_stack. Because MCA/INIT can occur when interrupts are
* disabled, we need to save the additional interrupt registers over
* MCA/INIT and resume.
*/
u64 isr;
u64 ifa;
u64 itir;
u64 iipa;
u64 iim;
u64 iha;
/* OS to SAL, must follow common */
u64 os_status; /* OS status to SAL, enum below */
u64 context; /* 0 if return to same context
1 if return to new context */
};
enum { enum {
IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
...@@ -83,36 +106,22 @@ enum { ...@@ -83,36 +106,22 @@ enum {
IA64_MCA_HALT = -3 /* System to be halted by SAL */ IA64_MCA_HALT = -3 /* System to be halted by SAL */
}; };
enum {
IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */
IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
};
enum { enum {
IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
}; };
typedef struct ia64_mca_os_to_sal_state_s {
u64 imots_os_status; /* OS status to SAL as to what happened
* with the MCA handling.
*/
u64 imots_sal_gp; /* GP of the SAL - physical */
u64 imots_context; /* 0 if return to same context
1 if return to new context */
u64 *imots_new_min_state; /* Pointer to structure containing
* new values of registers in the min state
* save area.
*/
u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going
* back to SAL from OS after MCA handling.
*/
} ia64_mca_os_to_sal_state_t;
/* Per-CPU MCA state that is too big for normal per-CPU variables. */ /* Per-CPU MCA state that is too big for normal per-CPU variables. */
struct ia64_mca_cpu { struct ia64_mca_cpu {
u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ u64 mca_stack[KERNEL_STACK_SIZE/8];
u64 proc_state_dump[512];
u64 stackframe[32];
u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */
u64 init_stack[KERNEL_STACK_SIZE/8]; u64 init_stack[KERNEL_STACK_SIZE/8];
} __attribute__ ((aligned(16))); };
/* Array of physical addresses of each CPU's MCA area. */ /* Array of physical addresses of each CPU's MCA area. */
extern unsigned long __per_cpu_mca[NR_CPUS]; extern unsigned long __per_cpu_mca[NR_CPUS];
...@@ -121,12 +130,29 @@ extern void ia64_mca_init(void); ...@@ -121,12 +130,29 @@ extern void ia64_mca_init(void);
extern void ia64_mca_cpu_init(void *); extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void); extern void ia64_os_mca_dispatch_end(void);
extern void ia64_mca_ucmc_handler(void); extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
extern void ia64_init_handler(struct pt_regs *,
struct switch_stack *,
struct ia64_sal_os_state *);
extern void ia64_monarch_init_handler(void); extern void ia64_monarch_init_handler(void);
extern void ia64_slave_init_handler(void); extern void ia64_slave_init_handler(void);
extern void ia64_mca_cmc_vector_setup(void); extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_reg_MCA_extension(void*); extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
extern void ia64_unreg_MCA_extension(void); extern void ia64_unreg_MCA_extension(void);
extern u64 ia64_get_rnat(u64 *);
#else /* __ASSEMBLY__ */
#define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */
#define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */
#define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */
#define IA64_MCA_HALT -3 /* System to be halted by SAL */
#define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */
#define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */
#define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */
#define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_MCA_H */ #endif /* _ASM_IA64_MCA_H */
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2002 Intel Corp. * Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
*/ */
#ifndef _ASM_IA64_MCA_ASM_H #ifndef _ASM_IA64_MCA_ASM_H
#define _ASM_IA64_MCA_ASM_H #define _ASM_IA64_MCA_ASM_H
...@@ -207,106 +209,33 @@ ...@@ -207,106 +209,33 @@
;; ;;
/* /*
* The following offsets capture the order in which the * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
* RSE related registers from the old context are * stacks, except that the SAL/OS state and a switch_stack are stored near the
* saved onto the new stack frame. * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
* well as MCA over INIT, each event needs its own SAL/OS state. All entries
* are 16 byte aligned.
* *
* +-----------------------+ * +---------------------------+
* |NDIRTY [BSP - BSPSTORE]| * | pt_regs |
* +-----------------------+ * +---------------------------+
* | RNAT | * | switch_stack |
* +-----------------------+ * +---------------------------+
* | BSPSTORE | * | SAL/OS state |
* +-----------------------+ * +---------------------------+
* | IFS | * | 16 byte scratch area |
* +-----------------------+ * +---------------------------+ <-------- SP at start of C MCA handler
* | PFS | * | ..... |
* +-----------------------+ * +---------------------------+
* | RSC | * | RBS for MCA/INIT handler |
* +-----------------------+ <-------- Bottom of new stack frame * +---------------------------+
* | struct task for MCA/INIT |
* +---------------------------+ <-------- Bottom of MCA/INIT stack
*/ */
#define rse_rsc_offset 0
#define rse_pfs_offset (rse_rsc_offset+0x08)
#define rse_ifs_offset (rse_pfs_offset+0x08)
#define rse_bspstore_offset (rse_ifs_offset+0x08)
#define rse_rnat_offset (rse_bspstore_offset+0x08)
#define rse_ndirty_offset (rse_rnat_offset+0x08)
/* #define ALIGN16(x) ((x)&~15)
* rse_switch_context #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
* #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
* 1. Save old RSC onto the new stack frame #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
* 2. Save PFS onto new stack frame #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
* 3. Cover the old frame and start a new frame.
* 4. Save IFS onto new stack frame
* 5. Save the old BSPSTORE on the new stack frame
* 6. Save the old RNAT on the new stack frame
* 7. Write BSPSTORE with the new backing store pointer
* 8. Read and save the new BSP to calculate the #dirty registers
* NOTE: Look at pages 11-10, 11-11 in PRM Vol 2
*/
#define rse_switch_context(temp,p_stackframe,p_bspstore) \
;; \
mov temp=ar.rsc;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.pfs;; \
st8 [p_stackframe]=temp,8; \
cover ;; \
mov temp=cr.ifs;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.bspstore;; \
st8 [p_stackframe]=temp,8;; \
mov temp=ar.rnat;; \
st8 [p_stackframe]=temp,8; \
mov ar.bspstore=p_bspstore;; \
mov temp=ar.bsp;; \
sub temp=temp,p_bspstore;; \
st8 [p_stackframe]=temp,8;;
/*
* rse_return_context
* 1. Allocate a zero-sized frame
* 2. Store the number of dirty registers RSC.loadrs field
* 3. Issue a loadrs to insure that any registers from the interrupted
* context which were saved on the new stack frame have been loaded
* back into the stacked registers
* 4. Restore BSPSTORE
* 5. Restore RNAT
* 6. Restore PFS
* 7. Restore IFS
* 8. Restore RSC
* 9. Issue an RFI
*/
#define rse_return_context(psr_mask_reg,temp,p_stackframe) \
;; \
alloc temp=ar.pfs,0,0,0,0; \
add p_stackframe=rse_ndirty_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
shl temp=temp,16;; \
mov ar.rsc=temp;; \
loadrs;; \
add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\
ld8 temp=[p_stackframe];; \
mov ar.bspstore=temp;; \
add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\
ld8 temp=[p_stackframe];; \
mov ar.rnat=temp;; \
add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov ar.pfs=temp;; \
add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov cr.ifs=temp;; \
add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \
ld8 temp=[p_stackframe];; \
mov ar.rsc=temp ; \
mov temp=psr;; \
or temp=temp,psr_mask_reg;; \
mov cr.ipsr=temp;; \
mov temp=ip;; \
add temp=0x30,temp;; \
mov cr.iip=temp;; \
srlz.i;; \
rfi;;
#endif /* _ASM_IA64_MCA_ASM_H */ #endif /* _ASM_IA64_MCA_ASM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment