Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3dbdb149
Commit
3dbdb149
authored
Feb 10, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/to-linus-2.5
into home.osdl.org:/home/torvalds/v2.5/linux
parents
2f57572d
7baf73ec
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
860 additions
and
1799 deletions
+860
-1799
arch/ia64/defconfig
arch/ia64/defconfig
+29
-31
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+230
-113
arch/ia64/kernel/efivars.c
arch/ia64/kernel/efivars.c
+42
-1
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca.c
+418
-1514
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/salinfo.c
+42
-2
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+0
-1
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+2
-9
arch/ia64/kernel/unaligned.c
arch/ia64/kernel/unaligned.c
+17
-27
arch/ia64/lib/io.c
arch/ia64/lib/io.c
+8
-8
arch/ia64/sn/kernel/mca.c
arch/ia64/sn/kernel/mca.c
+0
-14
arch/ia64/sn/kernel/sn2/sn2_smp.c
arch/ia64/sn/kernel/sn2/sn2_smp.c
+52
-8
include/asm-ia64/fpswa.h
include/asm-ia64/fpswa.h
+2
-0
include/asm-ia64/mca.h
include/asm-ia64/mca.h
+1
-57
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+3
-0
include/asm-ia64/percpu.h
include/asm-ia64/percpu.h
+1
-1
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+13
-13
No files found.
arch/ia64/defconfig
View file @
3dbdb149
...
...
@@ -48,13 +48,14 @@ CONFIG_64BIT=y
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_EFI=y
# CONFIG_ITANIUM is not set
CONFIG_MCKINLEY=y
# CONFIG_IA64_GENERIC is not set
# CONFIG_IA64_DIG is not set
# CONFIG_IA64_HP_SIM is not set
CONFIG_IA64_HP_ZX1=y
# CONFIG_IA64_SGI_SN2 is not set
# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_PAGE_SIZE_4KB is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
...
...
@@ -80,15 +81,14 @@ CONFIG_HUGETLB_PAGE_SIZE_64MB=y
# CONFIG_HUGETLB_PAGE_SIZE_256KB is not set
# CONFIG_IA64_PAL_IDLE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=16
# CONFIG_PREEMPT is not set
CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_HAVE_DEC_LOCK=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_EFI=y
CONFIG_EFI_VARS=y
CONFIG_NR_CPUS=16
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=y
...
...
@@ -140,7 +140,6 @@ CONFIG_HOTPLUG=y
#
# Plug and Play support
#
# CONFIG_PNP is not set
#
# Block devices
...
...
@@ -179,6 +178,7 @@ CONFIG_IDE_TASKFILE_IO=y
#
# IDE chipset support/bugfixes
#
# CONFIG_IDE_GENERIC is not set
CONFIG_BLK_DEV_IDEPCI=y
CONFIG_IDEPCI_SHARE_IRQ=y
# CONFIG_BLK_DEV_OFFBOARD is not set
...
...
@@ -223,7 +223,6 @@ CONFIG_IDEDMA_AUTO=y
#
# I2O device support
#
# CONFIG_I2O is not set
#
# Multi-device support (RAID and LVM)
...
...
@@ -234,6 +233,7 @@ CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID5=m
# CONFIG_MD_RAID6 is not set
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_IOCTL_V4=y
...
...
@@ -303,9 +303,15 @@ CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
CONFIG_SCSI_QLOGIC_1280=y
CONFIG_SCSI_QLA2XXX=y
# CONFIG_SCSI_QLA21XX is not set
# CONFIG_SCSI_QLA22XX is not set
# CONFIG_SCSI_QLA2300 is not set
# CONFIG_SCSI_QLA2322 is not set
# CONFIG_SCSI_QLA6312 is not set
# CONFIG_SCSI_QLA6322 is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
#
...
...
@@ -414,6 +420,7 @@ CONFIG_NET_PCI=y
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=y
# CONFIG_EEPRO100_PIO is not set
...
...
@@ -539,8 +546,8 @@ CONFIG_HW_CONSOLE=y
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_ACPI=y
CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_ACPI=y
CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
...
...
@@ -552,24 +559,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_UNIX98_PTY_COUNT=256
#
# I2C support
#
# CONFIG_I2C is not set
#
# I2C Algorithms
#
#
# I2C Hardware Bus support
#
#
# I2C Hardware Sensors Chip support
#
# CONFIG_I2C_SENSOR is not set
#
# Mice
#
...
...
@@ -609,6 +598,11 @@ CONFIG_DRM_RADEON=m
# CONFIG_DRM_SIS is not set
# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C is not set
#
# Multimedia devices
#
...
...
@@ -789,6 +783,7 @@ CONFIG_FB_RADEON=y
# CONFIG_FB_ATY is not set
# CONFIG_FB_SIS is not set
# CONFIG_FB_NEOMAGIC is not set
# CONFIG_FB_KYRO is not set
# CONFIG_FB_3DFX is not set
# CONFIG_FB_VOODOO1 is not set
# CONFIG_FB_TRIDENT is not set
...
...
@@ -844,6 +839,7 @@ CONFIG_SND_SEQUENCER=m
#
# CONFIG_SND_ALI5451 is not set
# CONFIG_SND_AZT3328 is not set
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CS46XX is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_EMU10K1 is not set
...
...
@@ -927,7 +923,6 @@ CONFIG_USB_HIDDEV=y
# USB Imaging devices
#
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
# CONFIG_USB_HPUSBSCSI is not set
...
...
@@ -961,12 +956,19 @@ CONFIG_USB_HIDDEV=y
#
# USB Miscellaneous drivers
#
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_TIGL is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_BRLVGER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
#
# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
#
...
...
@@ -988,10 +990,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_IA64_PRINT_HAZARDS=y
# CONFIG_DISABLE_VHPT is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_IA64_EARLY_PRINTK=y
CONFIG_IA64_EARLY_PRINTK_UART=y
CONFIG_IA64_EARLY_PRINTK_UART_BASE=0xff5e0000
CONFIG_IA64_EARLY_PRINTK_VGA=y
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
...
...
arch/ia64/hp/common/sba_iommu.c
View file @
3dbdb149
...
...
@@ -57,10 +57,20 @@
** There's potentially a conflict in the bio merge code with us
** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
** appears to give more performance than bio-level virtual merging, we'll
** do the former for now.
** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
** completely restrict DMA to the IOMMU.
*/
#define ALLOW_IOV_BYPASS
/*
** This option specifically allows/disallows bypassing scatterlists with
** multiple entries. Coalescing these entries can allow better DMA streaming
** and in some cases shows better performance than entirely bypassing the
** IOMMU. Performance increase on the order of 1-2% sequential output/input
** using bonnie++ on a RAID0 MD device (sym2 & mpt).
*/
#undef ALLOW_IOV_BYPASS_SG
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
...
...
@@ -75,7 +85,10 @@
#define ENABLE_MARK_CLEAN
/*
** The number of debug flags is a clue - this code is fragile.
** The number of debug flags is a clue - this code is fragile. NOTE: since
** tightening the use of res_lock the resource bitmap and actual pdir are no
** longer guaranteed to stay in sync. The sanity checking code isn't going to
** like that.
*/
#undef DEBUG_SBA_INIT
#undef DEBUG_SBA_RUN
...
...
@@ -140,9 +153,7 @@
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT 16
#define DEFAULT_DMA_HINT_REG 0
#define DELAYED_RESOURCE_CNT 64
#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
...
...
@@ -187,13 +198,14 @@ struct ioc {
unsigned
long
imask
;
/* pdir IOV Space mask */
unsigned
long
*
res_hint
;
/* next avail IOVP - circular search */
spinlock_t
res_lock
;
unsigned
long
hint_mask_pdir
;
/* bits used for DMA hints */
unsigned
long
dma_mask
;
spinlock_t
res_lock
;
/* protects the resource bitmap, but must be held when */
/* clearing pdir to prevent races with allocations. */
unsigned
int
res_bitshift
;
/* from the RIGHT! */
unsigned
int
res_size
;
/* size of resource map in bytes */
unsigned
int
hint_shift_pdir
;
unsigned
long
dma_mask
;
#if DELAYED_RESOURCE_CNT > 0
spinlock_t
saved_lock
;
/* may want to try to get this on a separate cacheline */
/* than res_lock for bigger systems. */
int
saved_cnt
;
struct
sba_dma_pair
{
dma_addr_t
iova
;
...
...
@@ -221,6 +233,9 @@ struct ioc {
static
struct
ioc
*
ioc_list
;
static
int
reserve_sba_gart
=
1
;
static
SBA_INLINE
void
sba_mark_invalid
(
struct
ioc
*
,
dma_addr_t
,
size_t
);
static
SBA_INLINE
void
sba_free_range
(
struct
ioc
*
,
dma_addr_t
,
size_t
);
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
#ifdef FULL_VALID_PDIR
...
...
@@ -405,7 +420,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
#define PAGES_PER_RANGE 1
/* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
#define SBA_IOVA(ioc,iovp,offset
,hint_reg
) ((ioc->ibase) | (iovp) | (offset))
#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
#define PDIR_ENTRY_SIZE sizeof(u64)
...
...
@@ -453,20 +468,25 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
ASSERT
(((
unsigned
long
)
ioc
->
res_hint
&
(
sizeof
(
unsigned
long
)
-
1UL
))
==
0
);
ASSERT
(
res_ptr
<
res_end
);
if
(
bits_wanted
>
(
BITS_PER_LONG
/
2
))
{
/* Search word at a time - no mask needed */
for
(;
res_ptr
<
res_end
;
++
res_ptr
)
{
if
(
*
res_ptr
==
0
)
{
*
res_ptr
=
RESMAP_MASK
(
bits_wanted
);
if
(
likely
(
bits_wanted
==
1
))
{
unsigned
int
bitshiftcnt
;
for
(;
res_ptr
<
res_end
;
res_ptr
++
)
{
if
(
likely
(
*
res_ptr
!=
~
0UL
))
{
bitshiftcnt
=
ffz
(
*
res_ptr
);
*
res_ptr
|=
(
1UL
<<
bitshiftcnt
);
pide
=
((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
);
pide
<<=
3
;
/* convert to bit address */
break
;
pide
+=
bitshiftcnt
;
ioc
->
res_bitshift
=
bitshiftcnt
+
bits_wanted
;
goto
found_it
;
}
}
/* point to the next word on next pass */
res_ptr
++
;
ioc
->
res_bitshift
=
0
;
}
else
{
goto
not_found
;
}
if
(
likely
(
bits_wanted
<=
BITS_PER_LONG
/
2
))
{
/*
** Search the resource bit map on well-aligned values.
** "o" is the alignment.
...
...
@@ -475,45 +495,72 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
*/
unsigned
long
o
=
1
<<
get_iovp_order
(
bits_wanted
<<
iovp_shift
);
uint
bitshiftcnt
=
ROUNDUP
(
ioc
->
res_bitshift
,
o
);
unsigned
long
mask
;
unsigned
long
mask
,
base_mask
;
if
(
bitshiftcnt
>=
BITS_PER_LONG
)
{
bitshiftcnt
=
0
;
res_ptr
++
;
}
mask
=
RESMAP_MASK
(
bits_wanted
)
<<
bitshiftcnt
;
base_mask
=
RESMAP_MASK
(
bits_wanted
);
mask
=
base_mask
<<
bitshiftcnt
;
DBG_RES
(
"%s() o %ld %p"
,
__FUNCTION__
,
o
,
res_ptr
);
while
(
res_ptr
<
res_end
)
for
(;
res_ptr
<
res_end
;
res_ptr
++
)
{
DBG_RES
(
" %p %lx %lx
\n
"
,
res_ptr
,
mask
,
*
res_ptr
);
ASSERT
(
0
!=
mask
);
for
(;
mask
;
mask
<<=
o
,
bitshiftcnt
+=
o
)
{
if
(
0
==
((
*
res_ptr
)
&
mask
))
{
*
res_ptr
|=
mask
;
/* mark resources busy! */
pide
=
((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
);
pide
<<=
3
;
/* convert to bit address */
pide
+=
bitshiftcnt
;
break
;
ioc
->
res_bitshift
=
bitshiftcnt
+
bits_wanted
;
goto
found_it
;
}
mask
<<=
o
;
bitshiftcnt
+=
o
;
if
(
0
==
mask
)
{
mask
=
RESMAP_MASK
(
bits_wanted
);
bitshiftcnt
=
0
;
res_ptr
++
;
}
bitshiftcnt
=
0
;
mask
=
base_mask
;
}
}
else
{
int
qwords
,
bits
,
i
;
unsigned
long
*
end
;
qwords
=
bits_wanted
>>
6
;
/* /64 */
bits
=
bits_wanted
-
(
qwords
*
BITS_PER_LONG
);
end
=
res_end
-
qwords
;
for
(;
res_ptr
<
end
;
res_ptr
++
)
{
for
(
i
=
0
;
i
<
qwords
;
i
++
)
{
if
(
res_ptr
[
i
]
!=
0
)
goto
next_ptr
;
}
if
(
bits
&&
res_ptr
[
i
]
&&
(
__ffs
(
res_ptr
[
i
])
<
bits
))
continue
;
/* Found it, mark it */
for
(
i
=
0
;
i
<
qwords
;
i
++
)
res_ptr
[
i
]
=
~
0UL
;
res_ptr
[
i
]
|=
RESMAP_MASK
(
bits
);
pide
=
((
unsigned
long
)
res_ptr
-
(
unsigned
long
)
ioc
->
res_map
);
pide
<<=
3
;
/* convert to bit address */
res_ptr
+=
qwords
;
ioc
->
res_bitshift
=
bits
;
goto
found_it
;
next_ptr:
;
}
/* look in the same word on the next pass */
ioc
->
res_bitshift
=
bitshiftcnt
+
bits_wanted
;
}
/* wrapped ? */
if
(
res_end
<=
res_ptr
)
{
not_found:
prefetch
(
ioc
->
res_map
);
ioc
->
res_hint
=
(
unsigned
long
*
)
ioc
->
res_map
;
ioc
->
res_bitshift
=
0
;
}
else
{
return
(
pide
);
found_it:
ioc
->
res_hint
=
res_ptr
;
}
return
(
pide
);
}
...
...
@@ -531,26 +578,67 @@ sba_alloc_range(struct ioc *ioc, size_t size)
{
unsigned
int
pages_needed
=
size
>>
iovp_shift
;
#ifdef PDIR_SEARCH_TIMING
unsigned
long
itc_start
=
ia64_get_itc
()
;
unsigned
long
itc_start
;
#endif
unsigned
long
pide
;
unsigned
long
flags
;
ASSERT
(
pages_needed
);
ASSERT
(
pages_needed
<=
BITS_PER_LONG
);
ASSERT
(
0
==
(
size
&
~
iovp_mask
));
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#ifdef PDIR_SEARCH_TIMING
itc_start
=
ia64_get_itc
();
#endif
/*
** "seek and ye shall find"...praying never hurts either...
*/
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
);
if
(
pide
>=
(
ioc
->
res_size
<<
3
))
{
if
(
unlikely
(
pide
>=
(
ioc
->
res_size
<<
3
)))
{
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
);
if
(
unlikely
(
pide
>=
(
ioc
->
res_size
<<
3
)))
{
#if DELAYED_RESOURCE_CNT > 0
/*
** With delayed resource freeing, we can give this one more shot. We're
** getting close to being in trouble here, so do what we can to make this
** one count.
*/
spin_lock
(
&
ioc
->
saved_lock
);
if
(
ioc
->
saved_cnt
>
0
)
{
struct
sba_dma_pair
*
d
;
int
cnt
=
ioc
->
saved_cnt
;
d
=
&
(
ioc
->
saved
[
ioc
->
saved_cnt
]);
while
(
cnt
--
)
{
sba_mark_invalid
(
ioc
,
d
->
iova
,
d
->
size
);
sba_free_range
(
ioc
,
d
->
iova
,
d
->
size
);
d
--
;
}
ioc
->
saved_cnt
=
0
;
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
}
spin_unlock
(
&
ioc
->
saved_lock
);
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
);
if
(
pide
>=
(
ioc
->
res_size
<<
3
))
if
(
unlikely
(
pide
>=
(
ioc
->
res_size
<<
3
)
))
panic
(
__FILE__
": I/O MMU @ %p is out of mapping resources
\n
"
,
ioc
->
ioc_hpa
);
#else
panic
(
__FILE__
": I/O MMU @ %p is out of mapping resources
\n
"
,
ioc
->
ioc_hpa
);
#endif
}
}
#ifdef PDIR_SEARCH_TIMING
ioc
->
avg_search
[
ioc
->
avg_idx
++
]
=
(
ia64_get_itc
()
-
itc_start
)
/
pages_needed
;
ioc
->
avg_idx
&=
SBA_SEARCH_SAMPLE
-
1
;
#endif
prefetchw
(
&
(
ioc
->
pdir_base
[
pide
]));
#ifdef ASSERT_PDIR_SANITY
/* verify the first enable bit is clear */
if
(
0x00
!=
((
u8
*
)
ioc
->
pdir_base
)[
pide
*
PDIR_ENTRY_SIZE
+
7
])
{
...
...
@@ -563,10 +651,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
(
uint
)
((
unsigned
long
)
ioc
->
res_hint
-
(
unsigned
long
)
ioc
->
res_map
),
ioc
->
res_bitshift
);
#ifdef PDIR_SEARCH_TIMING
ioc
->
avg_search
[
ioc
->
avg_idx
++
]
=
ia64_get_itc
()
-
itc_start
;
ioc
->
avg_idx
&=
SBA_SEARCH_SAMPLE
-
1
;
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
return
(
pide
);
}
...
...
@@ -587,22 +672,33 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
unsigned
int
pide
=
PDIR_INDEX
(
iovp
);
unsigned
int
ridx
=
pide
>>
3
;
/* convert bit to byte address */
unsigned
long
*
res_ptr
=
(
unsigned
long
*
)
&
((
ioc
)
->
res_map
[
ridx
&
~
RESMAP_IDX_MASK
]);
int
bits_not_wanted
=
size
>>
iovp_shift
;
unsigned
long
m
;
for
(;
bits_not_wanted
>
0
;
res_ptr
++
)
{
if
(
unlikely
(
bits_not_wanted
>
BITS_PER_LONG
))
{
/* these mappings start 64bit aligned */
*
res_ptr
=
0UL
;
bits_not_wanted
-=
BITS_PER_LONG
;
pide
+=
BITS_PER_LONG
;
}
else
{
/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
unsigned
long
m
=
RESMAP_MASK
(
bits_not_wanted
)
<<
(
pide
&
(
BITS_PER_LONG
-
1
));
m
=
RESMAP_MASK
(
bits_not_wanted
)
<<
(
pide
&
(
BITS_PER_LONG
-
1
));
bits_not_wanted
=
0
;
DBG_RES
(
"%s( ,%x,%x) %x/%lx %x %p %lx
\n
"
,
__FUNCTION__
,
(
uint
)
iova
,
size
,
DBG_RES
(
"%s( ,%x,%x) %x/%lx %x %p %lx
\n
"
,
__FUNCTION__
,
(
uint
)
iova
,
size
,
bits_not_wanted
,
m
,
pide
,
res_ptr
,
*
res_ptr
);
ASSERT
(
m
!=
0
);
ASSERT
(
bits_not_wanted
);
ASSERT
((
bits_not_wanted
*
iovp_size
)
<=
DMA_CHUNK_SIZE
);
ASSERT
(
bits_not_wanted
<=
BITS_PER_LONG
);
ASSERT
((
*
res_ptr
&
m
)
==
m
);
/* verify same bits are set */
*
res_ptr
&=
~
m
;
}
}
}
...
...
@@ -612,9 +708,6 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
*
***************************************************************/
#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
/**
* sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO PDIR entry
...
...
@@ -764,32 +857,36 @@ dma_addr_t
sba_map_single
(
struct
device
*
dev
,
void
*
addr
,
size_t
size
,
int
dir
)
{
struct
ioc
*
ioc
;
unsigned
long
flags
;
dma_addr_t
iovp
;
dma_addr_t
offset
;
u64
*
pdir_start
;
int
pide
;
#ifdef ASSERT_PDIR_SANITY
unsigned
long
flags
;
#endif
#ifdef ALLOW_IOV_BYPASS
unsigned
long
pci_addr
=
virt_to_phys
(
addr
);
#endif
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
#ifdef ALLOW_IOV_BYPASS
ASSERT
(
to_pci_dev
(
dev
)
->
dma_mask
);
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if
(
dev
&&
dev
->
dma_mask
&&
(
pci_addr
&
~*
dev
->
dma_mask
)
==
0
)
{
if
(
likely
((
pci_addr
&
~
to_pci_dev
(
dev
)
->
dma_mask
)
==
0
)
)
{
/*
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
*/
DBG_BYPASS
(
"sba_map_single() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
*
dev
->
dma_mask
,
pci_addr
);
to_pci_dev
(
dev
)
->
dma_mask
,
pci_addr
);
return
pci_addr
;
}
#endif
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
prefetch
(
ioc
->
res_hint
);
ASSERT
(
size
>
0
);
ASSERT
(
size
<=
DMA_CHUNK_SIZE
);
...
...
@@ -800,13 +897,15 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
/* round up to nearest iovp_size */
size
=
(
size
+
offset
+
~
iovp_mask
)
&
iovp_mask
;
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
if
(
sba_check_pdir
(
ioc
,
"Check before sba_map_single()"
))
panic
(
"Sanity check failed"
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
pide
=
sba_alloc_range
(
ioc
,
size
);
iovp
=
(
dma_addr_t
)
pide
<<
iovp_shift
;
DBG_RUN
(
"%s() 0x%p -> 0x%lx
\n
"
,
...
...
@@ -829,10 +928,11 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
/* form complete address */
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
sba_check_pdir
(
ioc
,
"Check after sba_map_single()"
);
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
);
#endif
return
SBA_IOVA
(
ioc
,
iovp
,
offset
);
}
/**
...
...
@@ -857,7 +957,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
ASSERT
(
ioc
);
#ifdef ALLOW_IOV_BYPASS
if
(
(
iova
&
ioc
->
imask
)
!=
ioc
->
ibase
)
{
if
(
likely
((
iova
&
ioc
->
imask
)
!=
ioc
->
ibase
)
)
{
/*
** Address does not fall w/in IOVA, must be bypassing
*/
...
...
@@ -880,14 +980,15 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
size
+=
offset
;
size
=
ROUNDUP
(
size
,
iovp_size
);
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#if DELAYED_RESOURCE_CNT > 0
spin_lock_irqsave
(
&
ioc
->
saved_lock
,
flags
);
d
=
&
(
ioc
->
saved
[
ioc
->
saved_cnt
]);
d
->
iova
=
iova
;
d
->
size
=
size
;
if
(
++
(
ioc
->
saved_cnt
)
>=
DELAYED_RESOURCE_CNT
)
{
if
(
unlikely
(
++
(
ioc
->
saved_cnt
)
>=
DELAYED_RESOURCE_CNT
)
)
{
int
cnt
=
ioc
->
saved_cnt
;
spin_lock
(
&
ioc
->
res_lock
);
while
(
cnt
--
)
{
sba_mark_invalid
(
ioc
,
d
->
iova
,
d
->
size
);
sba_free_range
(
ioc
,
d
->
iova
,
d
->
size
);
...
...
@@ -895,11 +996,15 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
}
ioc
->
saved_cnt
=
0
;
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
spin_unlock
(
&
ioc
->
res_lock
);
}
spin_unlock_irqrestore
(
&
ioc
->
saved_lock
,
flags
);
#else
/* DELAYED_RESOURCE_CNT == 0 */
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
sba_mark_invalid
(
ioc
,
iova
,
size
);
sba_free_range
(
ioc
,
iova
,
size
);
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
/* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN
if
(
dir
==
DMA_FROM_DEVICE
)
{
...
...
@@ -925,16 +1030,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
}
}
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
}
...
...
@@ -953,18 +1048,33 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int
void
*
addr
;
addr
=
(
void
*
)
__get_free_pages
(
flags
,
get_order
(
size
));
if
(
!
addr
)
if
(
unlikely
(
!
addr
)
)
return
NULL
;
memset
(
addr
,
0
,
size
);
*
dma_handle
=
virt_to_phys
(
addr
);
#ifdef ALLOW_IOV_BYPASS
ASSERT
(
to_pci_dev
(
dev
)
->
consistent_dma_mask
);
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if
(
likely
((
*
dma_handle
&
~
to_pci_dev
(
dev
)
->
consistent_dma_mask
)
==
0
))
{
DBG_BYPASS
(
"sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
to_pci_dev
(
dev
)
->
consistent_dma_mask
,
*
dma_handle
);
return
addr
;
}
#endif
/*
*
REVISIT: if sba_map_single starts needing more than dma_mask from th
e
* device
, this needs to be updated
.
*
If device can't bypass or bypass is disabled, pass the 32bit fak
e
* device
to map single to get an iova mapping
.
*/
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
*
dma_handle
=
sba_map_single
(
&
ioc
->
sac_only_dev
->
dev
,
addr
,
size
,
0
);
memset
(
addr
,
0
,
size
);
return
addr
;
}
...
...
@@ -1232,8 +1342,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
{
struct
ioc
*
ioc
;
int
coalesced
,
filled
=
0
;
#ifdef ASSERT_PDIR_SANITY
unsigned
long
flags
;
#ifdef ALLOW_IOV_BYPASS
#endif
#ifdef ALLOW_IOV_BYPASS_SG
struct
scatterlist
*
sg
;
#endif
...
...
@@ -1241,8 +1353,9 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
#ifdef ALLOW_IOV_BYPASS
if
(
dev
&&
dev
->
dma_mask
&&
(
ioc
->
dma_mask
&
~*
dev
->
dma_mask
)
==
0
)
{
#ifdef ALLOW_IOV_BYPASS_SG
ASSERT
(
to_pci_dev
(
dev
)
->
dma_mask
);
if
(
likely
((
ioc
->
dma_mask
&
~
to_pci_dev
(
dev
)
->
dma_mask
)
==
0
))
{
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
){
sg
->
dma_length
=
sg
->
length
;
sg
->
dma_address
=
virt_to_phys
(
sba_sg_address
(
sg
));
...
...
@@ -1253,21 +1366,22 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
/* Fast path single entry scatterlists. */
if
(
nents
==
1
)
{
sglist
->
dma_length
=
sglist
->
length
;
sglist
->
dma_address
=
sba_map_single
(
dev
,
sba_sg_address
(
sglist
),
sglist
->
length
,
dir
);
sglist
->
dma_address
=
sba_map_single
(
dev
,
sba_sg_address
(
sglist
),
sglist
->
length
,
dir
);
return
1
;
}
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
if
(
sba_check_pdir
(
ioc
,
"Check before sba_map_sg()"
))
{
sba_dump_sg
(
ioc
,
sglist
,
nents
);
panic
(
"Check before sba_map_sg()"
);
}
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
prefetch
(
ioc
->
res_hint
);
/*
** First coalesce the chunks and allocate I/O pdir space
**
...
...
@@ -1289,14 +1403,14 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
filled
=
sba_fill_pdir
(
ioc
,
sglist
,
nents
);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
if
(
sba_check_pdir
(
ioc
,
"Check after sba_map_sg()"
))
{
sba_dump_sg
(
ioc
,
sglist
,
nents
);
panic
(
"Check after sba_map_sg()
\n
"
);
}
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
ASSERT
(
coalesced
==
filled
);
DBG_RUN_SG
(
"%s() DONE %d mappings
\n
"
,
__FUNCTION__
,
filled
);
...
...
@@ -1316,18 +1430,18 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
*/
void
sba_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sglist
,
int
nents
,
int
dir
)
{
struct
ioc
*
ioc
;
#ifdef ASSERT_PDIR_SANITY
struct
ioc
*
ioc
;
unsigned
long
flags
;
#endif
DBG_RUN_SG
(
"%s() START %d entries, %p,%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_address
(
sglist
),
sglist
->
length
);
#ifdef ASSERT_PDIR_SANITY
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
sba_check_pdir
(
ioc
,
"Check before sba_unmap_sg()"
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
...
...
@@ -1478,6 +1592,9 @@ static void __init
ioc_resource_init
(
struct
ioc
*
ioc
)
{
spin_lock_init
(
&
ioc
->
res_lock
);
#if DELAYED_RESOURCE_CNT > 0
spin_lock_init
(
&
ioc
->
saved_lock
);
#endif
/* resource map size dictated by pdir_size */
ioc
->
res_size
=
ioc
->
pdir_size
/
PDIR_ENTRY_SIZE
;
/* entries */
...
...
@@ -1689,13 +1806,13 @@ ioc_show(struct seq_file *s, void *v)
seq_printf
(
s
,
"Hewlett Packard %s IOC rev %d.%d
\n
"
,
ioc
->
name
,
((
ioc
->
rev
>>
4
)
&
0xF
),
(
ioc
->
rev
&
0xF
));
seq_printf
(
s
,
"IOVA size : %
d MB
\n
"
,
ioc
->
iov_size
/
(
1024
*
1024
));
seq_printf
(
s
,
"IOVA size : %
ld MB
\n
"
,
((
ioc
->
pdir_size
>>
3
)
*
iovp_size
)
/
(
1024
*
1024
));
seq_printf
(
s
,
"IOVA page size : %ld kb
\n
"
,
iovp_size
/
1024
);
for
(
i
=
0
;
i
<
(
ioc
->
res_size
/
sizeof
(
unsigned
long
));
++
i
,
++
res_ptr
)
used
+=
hweight64
(
*
res_ptr
);
seq_printf
(
s
,
"PDIR size : %d entries
\n
"
,
ioc
->
res_size
<<
3
);
seq_printf
(
s
,
"PDIR size : %d entries
\n
"
,
ioc
->
pdir_size
>>
3
);
seq_printf
(
s
,
"PDIR used : %d entries
\n
"
,
used
);
#ifdef PDIR_SEARCH_TIMING
...
...
@@ -1708,7 +1825,7 @@ ioc_show(struct seq_file *s, void *v)
if
(
ioc
->
avg_search
[
i
]
<
min
)
min
=
ioc
->
avg_search
[
i
];
}
avg
/=
SBA_SEARCH_SAMPLE
;
seq_printf
(
s
,
"Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)
\n
"
,
seq_printf
(
s
,
"Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles
/IOVA page
)
\n
"
,
min
,
avg
,
max
);
}
#endif
...
...
arch/ia64/kernel/efivars.c
View file @
3dbdb149
...
...
@@ -29,6 +29,9 @@
*
* Changelog:
*
* 10 Feb 2004 - Stephane Eranian <eranian@hpl.hp.com>
* Provide FPSWA version number via /proc/efi/fpswa
*
* 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
* fix locking per Peter Chubb's findings
*
...
...
@@ -70,6 +73,7 @@
#include <linux/smp.h>
#include <linux/efi.h>
#include <asm/fpswa.h>
#include <asm/uaccess.h>
MODULE_AUTHOR
(
"Matt Domsch <Matt_Domsch@Dell.com>"
);
...
...
@@ -407,6 +411,37 @@ static struct file_operations efi_systab_fops = {
.
read
=
efi_systab_read
,
};
static
ssize_t
efi_fpswa_read
(
struct
file
*
file
,
char
*
buffer
,
size_t
count
,
loff_t
*
ppos
)
{
ssize_t
size
,
length
;
char
str
[
32
];
void
*
data
;
snprintf
(
str
,
sizeof
(
str
),
"revision=%u.%u
\n
"
,
fpswa_interface
->
revision
>>
16
,
fpswa_interface
->
revision
&
0xffff
);
length
=
strlen
(
str
);
if
(
*
ppos
>=
length
)
return
0
;
data
=
str
+
file
->
f_pos
;
size
=
length
-
file
->
f_pos
;
if
(
size
>
count
)
size
=
count
;
if
(
copy_to_user
(
buffer
,
data
,
size
))
return
-
EFAULT
;
*
ppos
+=
size
;
return
size
;
}
static
struct
proc_dir_entry
*
efi_fpswa_entry
;
static
struct
file_operations
efi_fpswa_fops
=
{
.
read
=
efi_fpswa_read
,
};
static
int
__init
efivars_init
(
void
)
{
...
...
@@ -429,6 +464,12 @@ efivars_init(void)
if
(
efi_systab_entry
)
efi_systab_entry
->
proc_fops
=
&
efi_systab_fops
;
if
(
fpswa_interface
)
{
efi_fpswa_entry
=
create_proc_entry
(
"fpswa"
,
S_IRUGO
,
efi_dir
);
if
(
efi_fpswa_entry
)
efi_fpswa_entry
->
proc_fops
=
&
efi_fpswa_fops
;
}
efi_vars_dir
=
proc_mkdir
(
"vars"
,
efi_dir
);
/* Per EFI spec, the maximum storage allocated for both
...
...
arch/ia64/kernel/mca.c
View file @
3dbdb149
...
...
@@ -18,7 +18,7 @@
* Copyright (C) 2000 Intel
* Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
*
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 1999
, 2004
Silicon Graphics, Inc.
* Copyright (C) Vijay Chander(vijay@engr.sgi.com)
*
* 03/04/15 D. Mosberger Added INIT backtrace support.
...
...
@@ -40,6 +40,14 @@
* 2003-12-08 Keith Owens <kaos@sgi.com>
* smp_call_function() must not be called from interrupt context (can
* deadlock on tasklist_lock). Use keventd to call smp_call_function().
*
* 2004-02-01 Keith Owens <kaos@sgi.com>
* Avoid deadlock when using printk() for MCA and INIT records.
* Delete all record printing code, moved to salinfo_decode in user space.
* Mark variables and functions static where possible.
* Delete dead variables and functions.
* Reorder to remove the need for forward declarations and to consolidate
* related code.
*/
#include <linux/config.h>
#include <linux/types.h>
...
...
@@ -68,14 +76,18 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
#undef MCA_PRT_XTRA_DATA
#if defined(IA64_MCA_DEBUG_INFO)
# define IA64_MCA_DEBUG(fmt...) printk(fmt)
#else
# define IA64_MCA_DEBUG(fmt...)
#endif
typedef
struct
ia64_fptr
{
unsigned
long
fp
;
unsigned
long
gp
;
}
ia64_fptr_t
;
ia64_mc_info_t
ia64_mc_info
;
/* Used by mca_asm.S */
ia64_mca_sal_to_os_state_t
ia64_sal_to_os_handoff_state
;
ia64_mca_os_to_sal_state_t
ia64_os_to_sal_handoff_state
;
u64
ia64_mca_proc_state_dump
[
512
];
...
...
@@ -83,56 +95,17 @@ u64 ia64_mca_stack[1024] __attribute__((aligned(16)));
u64
ia64_mca_stackframe
[
32
];
u64
ia64_mca_bspstore
[
1024
];
u64
ia64_init_stack
[
KERNEL_STACK_SIZE
/
8
]
__attribute__
((
aligned
(
16
)));
u64
ia64_os_mca_recovery_successful
;
u64
ia64_mca_serialize
;
static
void
ia64_mca_wakeup_ipi_wait
(
void
);
static
void
ia64_mca_wakeup
(
int
cpu
);
static
void
ia64_mca_wakeup_all
(
void
);
static
void
ia64_log_init
(
int
);
/* In mca_asm.S */
extern
void
ia64_monarch_init_handler
(
void
);
extern
void
ia64_slave_init_handler
(
void
);
static
u64
ia64_log_get
(
int
sal_info_type
,
u8
**
buffer
);
extern
struct
hw_interrupt_type
irq_type_iosapic_level
;
struct
ia64_mca_tlb_info
ia64_mca_tlb_list
[
NR_CPUS
];
static
struct
irqaction
cmci_irqaction
=
{
.
handler
=
ia64_mca_cmc_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cmc_hndlr"
};
static
struct
irqaction
cmcp_irqaction
=
{
.
handler
=
ia64_mca_cmc_int_caller
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cmc_poll"
};
static
struct
irqaction
mca_rdzv_irqaction
=
{
.
handler
=
ia64_mca_rendez_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"mca_rdzv"
};
static
struct
irqaction
mca_wkup_irqaction
=
{
.
handler
=
ia64_mca_wakeup_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"mca_wkup"
};
static
ia64_mc_info_t
ia64_mc_info
;
#ifdef CONFIG_ACPI
static
struct
irqaction
mca_cpe_irqaction
=
{
.
handler
=
ia64_mca_cpe_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cpe_hndlr"
};
extern
struct
hw_interrupt_type
irq_type_iosapic_level
;
static
struct
irqaction
mca_cpep_irqaction
=
{
.
handler
=
ia64_mca_cpe_int_caller
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cpe_poll"
};
#endif
/* CONFIG_ACPI */
struct
ia64_mca_tlb_info
ia64_mca_tlb_list
[
NR_CPUS
];
#define MAX_CPE_POLL_INTERVAL (15*60*HZ)
/* 15 minutes */
#define MIN_CPE_POLL_INTERVAL (2*60*HZ)
/* 2 minutes */
...
...
@@ -156,59 +129,152 @@ static int cmc_polling_enabled = 1;
*/
static
int
cpe_poll_enabled
=
1
;
extern
void
salinfo_log_wakeup
(
int
type
,
u8
*
buffer
,
u64
size
);
extern
void
salinfo_log_wakeup
(
int
type
,
u8
*
buffer
,
u64
size
,
int
irqsafe
);
/*
* IA64_MCA log support
*/
#define IA64_MAX_LOGS 2
/* Double-buffering for nested MCAs */
#define IA64_MAX_LOG_TYPES 4
/* MCA, INIT, CMC, CPE */
typedef
struct
ia64_state_log_s
{
spinlock_t
isl_lock
;
int
isl_index
;
unsigned
long
isl_count
;
ia64_err_rec_t
*
isl_log
[
IA64_MAX_LOGS
];
/* need space to store header + error log */
}
ia64_state_log_t
;
static
ia64_state_log_t
ia64_state_log
[
IA64_MAX_LOG_TYPES
];
#define IA64_LOG_ALLOCATE(it, size) \
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
(ia64_err_rec_t *)alloc_bootmem(size); \
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
(ia64_err_rec_t *)alloc_bootmem(size);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
#define IA64_LOG_INDEX_INC(it) \
{ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
ia64_state_log[it].isl_count++;}
#define IA64_LOG_INDEX_DEC(it) \
ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
/*
* ia64_log_init
* Reset the OS ia64 log buffer
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
* Outputs : None
*/
static
void
ia64_log_init
(
int
sal_info_type
)
{
u64
max_size
=
0
;
IA64_LOG_NEXT_INDEX
(
sal_info_type
)
=
0
;
IA64_LOG_LOCK_INIT
(
sal_info_type
);
// SAL will tell us the maximum size of any error record of this type
max_size
=
ia64_sal_get_state_info_size
(
sal_info_type
);
if
(
!
max_size
)
/* alloc_bootmem() doesn't like zero-sized allocations! */
return
;
// set up OS data structures to hold error info
IA64_LOG_ALLOCATE
(
sal_info_type
,
max_size
);
memset
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
0
,
max_size
);
memset
(
IA64_LOG_NEXT_BUFFER
(
sal_info_type
),
0
,
max_size
);
}
/*
* ia64_log_get
*
* Get the current MCA log from SAL and copy it into the OS log buffer.
*
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
* irq_safe whether you can use printk at this point
* Outputs : size (total record length)
* *buffer (ptr to error record)
*
*/
static
u64
ia64_log_get
(
int
sal_info_type
,
u8
**
buffer
,
int
irq_safe
)
{
sal_log_record_header_t
*
log_buffer
;
u64
total_len
=
0
;
int
s
;
IA64_LOG_LOCK
(
sal_info_type
);
/* Get the process state information */
log_buffer
=
IA64_LOG_NEXT_BUFFER
(
sal_info_type
);
total_len
=
ia64_sal_get_state_info
(
sal_info_type
,
(
u64
*
)
log_buffer
);
if
(
total_len
)
{
IA64_LOG_INDEX_INC
(
sal_info_type
);
IA64_LOG_UNLOCK
(
sal_info_type
);
if
(
irq_safe
)
{
IA64_MCA_DEBUG
(
"%s: SAL error record type %d retrieved. "
"Record length = %ld
\n
"
,
__FUNCTION__
,
sal_info_type
,
total_len
);
}
*
buffer
=
(
u8
*
)
log_buffer
;
return
total_len
;
}
else
{
IA64_LOG_UNLOCK
(
sal_info_type
);
return
0
;
}
}
/*
* ia64_mca_log_sal_error_record
*
* This function retrieves a specified error record type from SAL,
* wakes up any processes waiting for error records, and sends it to
* the system log.
* This function retrieves a specified error record type from SAL
* and wakes up any processes waiting for error records.
*
* Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT)
*
Outputs : platform error status
*
called_from_init (1 for boot processing)
*/
int
static
void
ia64_mca_log_sal_error_record
(
int
sal_info_type
,
int
called_from_init
)
{
u8
*
buffer
;
u64
size
;
int
platform_err
;
int
irq_safe
=
sal_info_type
!=
SAL_INFO_TYPE_MCA
&&
sal_info_type
!=
SAL_INFO_TYPE_INIT
;
static
const
char
*
const
rec_name
[]
=
{
"MCA"
,
"INIT"
,
"CMC"
,
"CPE"
};
size
=
ia64_log_get
(
sal_info_type
,
&
buffer
);
size
=
ia64_log_get
(
sal_info_type
,
&
buffer
,
irq_safe
);
if
(
!
size
)
return
0
;
return
;
/* TODO:
* 1. analyze error logs to determine recoverability
* 2. perform error recovery procedures, if applicable
* 3. set ia64_os_mca_recovery_successful flag, if applicable
*/
salinfo_log_wakeup
(
sal_info_type
,
buffer
,
size
,
irq_safe
);
if
(
irq_safe
||
called_from_init
)
printk
(
KERN_INFO
"CPU %d: SAL log contains %s error record
\n
"
,
smp_processor_id
(),
sal_info_type
<
ARRAY_SIZE
(
rec_name
)
?
rec_name
[
sal_info_type
]
:
"UNKNOWN"
);
salinfo_log_wakeup
(
sal_info_type
,
buffer
,
size
);
platform_err
=
ia64_log_print
(
sal_info_type
,
(
prfunc_t
)
printk
);
/* Clear logs from corrected errors in case there's no user-level logger */
if
(
sal_info_type
==
SAL_INFO_TYPE_CPE
||
sal_info_type
==
SAL_INFO_TYPE_CMC
)
ia64_sal_clear_state_info
(
sal_info_type
);
return
platform_err
;
}
/*
* platform dependent error handling
*/
#ifndef PLATFORM_MCA_HANDLERS
void
mca_handler_platform
(
void
)
{
}
irqreturn_t
static
irqreturn_t
ia64_mca_cpe_int_handler
(
int
cpe_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
)
{
IA64_MCA_DEBUG
(
"
ia64_mca_cpe_int_handler
: received interrupt. CPU:%d vector = %#x
\n
"
,
smp_processor_id
(),
cpe_irq
);
IA64_MCA_DEBUG
(
"
%s
: received interrupt. CPU:%d vector = %#x
\n
"
,
__FUNCTION__
,
smp_processor_id
(),
cpe_irq
);
/* SAL spec states this should run w/ interrupts enabled */
local_irq_enable
();
...
...
@@ -356,7 +422,7 @@ fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_sta
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r30
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r31
);
}
void
static
void
init_handler_platform
(
pal_min_state_area_t
*
ms
,
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
)
{
...
...
@@ -403,23 +469,6 @@ init_handler_platform (pal_min_state_area_t *ms,
while
(
1
);
/* hang city if no debugger */
}
/*
* ia64_mca_init_platform
*
* External entry for platform specific MCA initialization.
*
* Inputs
* None
*
* Outputs
* None
*/
void
ia64_mca_init_platform
(
void
)
{
}
/*
* ia64_mca_check_errors
*
...
...
@@ -438,6 +487,7 @@ ia64_mca_check_errors (void)
/*
* If there is an MCA error record pending, get it and log it.
*/
printk
(
KERN_INFO
"CPU %d: checking for saved MCA error records
\n
"
,
smp_processor_id
());
ia64_mca_log_sal_error_record
(
SAL_INFO_TYPE_MCA
,
1
);
return
0
;
...
...
@@ -465,13 +515,13 @@ ia64_mca_register_cpev (int cpev)
isrv
=
ia64_sal_mc_set_params
(
SAL_MC_PARAM_CPE_INT
,
SAL_MC_PARAM_MECHANISM_INT
,
cpev
,
0
,
0
);
if
(
isrv
.
status
)
{
printk
(
KERN_ERR
"
ia64_mca_platform_init: failed to register Corrected
"
"
Platform Error interrupt vector with SAL.
\n
"
);
printk
(
KERN_ERR
"
Failed to register Corrected Platform
"
"
Error interrupt vector with SAL (status %ld)
\n
"
,
isrv
.
status
);
return
;
}
IA64_MCA_DEBUG
(
"
ia64_mca_platform_init
: corrected platform error "
"vector %#x setup and enabled
\n
"
,
cpev
);
IA64_MCA_DEBUG
(
"
%s
: corrected platform error "
"vector %#x setup and enabled
\n
"
,
__FUNCTION__
,
cpev
);
}
#endif
/* CONFIG_ACPI */
...
...
@@ -499,12 +549,12 @@ ia64_mca_cmc_vector_setup (void)
cmcv
.
cmcv_vector
=
IA64_CMC_VECTOR
;
ia64_setreg
(
_IA64_REG_CR_CMCV
,
cmcv
.
cmcv_regval
);
IA64_MCA_DEBUG
(
"
ia64_mca_platform_init
: CPU %d corrected "
IA64_MCA_DEBUG
(
"
%s
: CPU %d corrected "
"machine check vector %#x setup and enabled.
\n
"
,
smp_processor_id
(),
IA64_CMC_VECTOR
);
__FUNCTION__
,
smp_processor_id
(),
IA64_CMC_VECTOR
);
IA64_MCA_DEBUG
(
"
ia64_mca_platform_init
: CPU %d CMCV = %#016lx
\n
"
,
smp_processor_id
(),
ia64_getreg
(
_IA64_REG_CR_CMCV
));
IA64_MCA_DEBUG
(
"
%s
: CPU %d CMCV = %#016lx
\n
"
,
__FUNCTION__
,
smp_processor_id
(),
ia64_getreg
(
_IA64_REG_CR_CMCV
));
}
/*
...
...
@@ -519,7 +569,7 @@ ia64_mca_cmc_vector_setup (void)
* Outputs
* None
*/
void
static
void
ia64_mca_cmc_vector_disable
(
void
*
dummy
)
{
cmcv_reg_t
cmcv
;
...
...
@@ -529,9 +579,9 @@ ia64_mca_cmc_vector_disable (void *dummy)
cmcv
.
cmcv_mask
=
1
;
/* Mask/disable interrupt */
ia64_setreg
(
_IA64_REG_CR_CMCV
,
cmcv
.
cmcv_regval
)
IA64_MCA_DEBUG
(
"
ia64_mca_cmc_vector_disable
: CPU %d corrected "
IA64_MCA_DEBUG
(
"
%s
: CPU %d corrected "
"machine check vector %#x disabled.
\n
"
,
smp_processor_id
(),
cmcv
.
cmcv_vector
);
__FUNCTION__
,
smp_processor_id
(),
cmcv
.
cmcv_vector
);
}
/*
...
...
@@ -546,7 +596,7 @@ ia64_mca_cmc_vector_disable (void *dummy)
* Outputs
* None
*/
void
static
void
ia64_mca_cmc_vector_enable
(
void
*
dummy
)
{
cmcv_reg_t
cmcv
;
...
...
@@ -556,63 +606,9 @@ ia64_mca_cmc_vector_enable (void *dummy)
cmcv
.
cmcv_mask
=
0
;
/* Unmask/enable interrupt */
ia64_setreg
(
_IA64_REG_CR_CMCV
,
cmcv
.
cmcv_regval
)
IA64_MCA_DEBUG
(
"
ia64_mca_cmc_vector_enable
: CPU %d corrected "
IA64_MCA_DEBUG
(
"
%s
: CPU %d corrected "
"machine check vector %#x enabled.
\n
"
,
smp_processor_id
(),
cmcv
.
cmcv_vector
);
}
#if defined(MCA_TEST)
sal_log_processor_info_t
slpi_buf
;
void
mca_test
(
void
)
{
slpi_buf
.
valid
.
psi_static_struct
=
1
;
slpi_buf
.
valid
.
num_cache_check
=
1
;
slpi_buf
.
valid
.
num_tlb_check
=
1
;
slpi_buf
.
valid
.
num_bus_check
=
1
;
slpi_buf
.
valid
.
processor_static_info
.
minstate
=
1
;
slpi_buf
.
valid
.
processor_static_info
.
br
=
1
;
slpi_buf
.
valid
.
processor_static_info
.
cr
=
1
;
slpi_buf
.
valid
.
processor_static_info
.
ar
=
1
;
slpi_buf
.
valid
.
processor_static_info
.
rr
=
1
;
slpi_buf
.
valid
.
processor_static_info
.
fr
=
1
;
ia64_os_mca_dispatch
();
}
#endif
/* #if defined(MCA_TEST) */
/*
* verify_guid
*
* Compares a test guid to a target guid and returns result.
*
* Inputs
* test_guid * (ptr to guid to be verified)
* target_guid * (ptr to standard guid to be verified against)
*
* Outputs
* 0 (test verifies against target)
* non-zero (test guid does not verify)
*/
static
int
verify_guid
(
efi_guid_t
*
test
,
efi_guid_t
*
target
)
{
int
rc
;
#ifdef IA64_MCA_DEBUG_INFO
char
out
[
40
];
#endif
if
((
rc
=
efi_guidcmp
(
*
test
,
*
target
)))
{
IA64_MCA_DEBUG
(
KERN_DEBUG
"verify_guid: invalid GUID = %s
\n
"
,
efi_guid_unparse
(
test
,
out
));
}
return
rc
;
__FUNCTION__
,
smp_processor_id
(),
cmcv
.
cmcv_vector
);
}
/*
...
...
@@ -640,252 +636,67 @@ ia64_mca_cmc_vector_enable_keventd(void *unused)
}
/*
* ia64_mca_init
*
* Do all the system level mca specific initialization.
* ia64_mca_wakeup_ipi_wait
*
* 1. Register spinloop and wakeup request interrupt vectors
* Wait for the inter-cpu interrupt to be sent by the
* monarch processor once it is done with handling the
* MCA.
*
* 2. Register OS_MCA handler entry point
* Inputs : None
* Outputs : None
*/
static
void
ia64_mca_wakeup_ipi_wait
(
void
)
{
int
irr_num
=
(
IA64_MCA_WAKEUP_VECTOR
>>
6
);
int
irr_bit
=
(
IA64_MCA_WAKEUP_VECTOR
&
0x3f
);
u64
irr
=
0
;
do
{
switch
(
irr_num
)
{
case
0
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR0
);
break
;
case
1
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR1
);
break
;
case
2
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR2
);
break
;
case
3
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR3
);
break
;
}
}
while
(
!
(
irr
&
(
1UL
<<
irr_bit
)))
;
}
/*
* ia64_mca_wakeup
*
* 3. Register OS_INIT handler entry point
* Send an inter-cpu interrupt to wake-up a particular cpu
* and mark that cpu to be out of rendez.
*
* 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
* Inputs : cpuid
* Outputs : None
*/
static
void
ia64_mca_wakeup
(
int
cpu
)
{
platform_send_ipi
(
cpu
,
IA64_MCA_WAKEUP_VECTOR
,
IA64_IPI_DM_INT
,
0
);
ia64_mc_info
.
imi_rendez_checkin
[
cpu
]
=
IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
}
/*
* ia64_mca_wakeup_all
*
* Note that this initialization is done very early before some kernel
* services are available.
* Wakeup all the cpus which have rendez'ed previously.
*
* Inputs : None
*
* Outputs : None
*/
void
__init
ia64_mca_init
(
void
)
{
ia64_fptr_t
*
mon_init_ptr
=
(
ia64_fptr_t
*
)
ia64_monarch_init_handler
;
ia64_fptr_t
*
slave_init_ptr
=
(
ia64_fptr_t
*
)
ia64_slave_init_handler
;
ia64_fptr_t
*
mca_hldlr_ptr
=
(
ia64_fptr_t
*
)
ia64_os_mca_dispatch
;
int
i
;
s64
rc
;
struct
ia64_sal_retval
isrv
;
u64
timeout
=
IA64_MCA_RENDEZ_TIMEOUT
;
/* platform specific */
IA64_MCA_DEBUG
(
"ia64_mca_init: begin
\n
"
);
/* initialize recovery success indicator */
ia64_os_mca_recovery_successful
=
0
;
/* Clear the Rendez checkin flag for all cpus */
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
ia64_mc_info
.
imi_rendez_checkin
[
i
]
=
IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
/*
* Register the rendezvous spinloop and wakeup mechanism with SAL
*/
/* Register the rendezvous interrupt vector with SAL */
while
(
1
)
{
isrv
=
ia64_sal_mc_set_params
(
SAL_MC_PARAM_RENDEZ_INT
,
SAL_MC_PARAM_MECHANISM_INT
,
IA64_MCA_RENDEZ_VECTOR
,
timeout
,
SAL_MC_PARAM_RZ_ALWAYS
);
rc
=
isrv
.
status
;
if
(
rc
==
0
)
break
;
if
(
rc
==
-
2
)
{
printk
(
KERN_INFO
"ia64_mca_init: increasing MCA rendezvous timeout from "
"%ld to %ld
\n
"
,
timeout
,
isrv
.
v0
);
timeout
=
isrv
.
v0
;
continue
;
}
printk
(
KERN_ERR
"ia64_mca_init: Failed to register rendezvous interrupt "
"with SAL. rc = %ld
\n
"
,
rc
);
return
;
}
/* Register the wakeup interrupt vector with SAL */
isrv
=
ia64_sal_mc_set_params
(
SAL_MC_PARAM_RENDEZ_WAKEUP
,
SAL_MC_PARAM_MECHANISM_INT
,
IA64_MCA_WAKEUP_VECTOR
,
0
,
0
);
rc
=
isrv
.
status
;
if
(
rc
)
{
printk
(
KERN_ERR
"ia64_mca_init: Failed to register wakeup interrupt with SAL. "
"rc = %ld
\n
"
,
rc
);
return
;
}
IA64_MCA_DEBUG
(
"ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.
\n
"
);
ia64_mc_info
.
imi_mca_handler
=
ia64_tpa
(
mca_hldlr_ptr
->
fp
);
/*
* XXX - disable SAL checksum by setting size to 0; should be
* ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
*/
ia64_mc_info
.
imi_mca_handler_size
=
0
;
/* Register the os mca handler with SAL */
if
((
rc
=
ia64_sal_set_vectors
(
SAL_VECTOR_OS_MCA
,
ia64_mc_info
.
imi_mca_handler
,
ia64_tpa
(
mca_hldlr_ptr
->
gp
),
ia64_mc_info
.
imi_mca_handler_size
,
0
,
0
,
0
)))
{
printk
(
KERN_ERR
"ia64_mca_init: Failed to register os mca handler with SAL. "
"rc = %ld
\n
"
,
rc
);
return
;
}
IA64_MCA_DEBUG
(
"ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx
\n
"
,
ia64_mc_info
.
imi_mca_handler
,
ia64_tpa
(
mca_hldlr_ptr
->
gp
));
/*
* XXX - disable SAL checksum by setting size to 0, should be
* IA64_INIT_HANDLER_SIZE
*/
ia64_mc_info
.
imi_monarch_init_handler
=
ia64_tpa
(
mon_init_ptr
->
fp
);
ia64_mc_info
.
imi_monarch_init_handler_size
=
0
;
ia64_mc_info
.
imi_slave_init_handler
=
ia64_tpa
(
slave_init_ptr
->
fp
);
ia64_mc_info
.
imi_slave_init_handler_size
=
0
;
IA64_MCA_DEBUG
(
"ia64_mca_init: os init handler at %lx
\n
"
,
ia64_mc_info
.
imi_monarch_init_handler
);
/* Register the os init handler with SAL */
if
((
rc
=
ia64_sal_set_vectors
(
SAL_VECTOR_OS_INIT
,
ia64_mc_info
.
imi_monarch_init_handler
,
ia64_tpa
(
ia64_getreg
(
_IA64_REG_GP
)),
ia64_mc_info
.
imi_monarch_init_handler_size
,
ia64_mc_info
.
imi_slave_init_handler
,
ia64_tpa
(
ia64_getreg
(
_IA64_REG_GP
)),
ia64_mc_info
.
imi_slave_init_handler_size
)))
{
printk
(
KERN_ERR
"ia64_mca_init: Failed to register m/s init handlers with SAL. "
"rc = %ld
\n
"
,
rc
);
return
;
}
IA64_MCA_DEBUG
(
"ia64_mca_init: registered os init handler with SAL
\n
"
);
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
*/
register_percpu_irq
(
IA64_CMC_VECTOR
,
&
cmci_irqaction
);
register_percpu_irq
(
IA64_CMCP_VECTOR
,
&
cmcp_irqaction
);
ia64_mca_cmc_vector_setup
();
/* Setup vector on BSP & enable */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq
(
IA64_MCA_RENDEZ_VECTOR
,
&
mca_rdzv_irqaction
);
/* Setup the MCA wakeup interrupt vector */
register_percpu_irq
(
IA64_MCA_WAKEUP_VECTOR
,
&
mca_wkup_irqaction
);
#ifdef CONFIG_ACPI
/* Setup the CPE interrupt vector */
{
irq_desc_t
*
desc
;
unsigned
int
irq
;
int
cpev
=
acpi_request_vector
(
ACPI_INTERRUPT_CPEI
);
if
(
cpev
>=
0
)
{
for
(
irq
=
0
;
irq
<
NR_IRQS
;
++
irq
)
if
(
irq_to_vector
(
irq
)
==
cpev
)
{
desc
=
irq_descp
(
irq
);
desc
->
status
|=
IRQ_PER_CPU
;
desc
->
handler
=
&
irq_type_iosapic_level
;
setup_irq
(
irq
,
&
mca_cpe_irqaction
);
}
ia64_mca_register_cpev
(
cpev
);
}
}
#endif
/* Initialize the areas set aside by the OS to buffer the
* platform/processor error states for MCA/INIT/CMC
* handling.
*/
ia64_log_init
(
SAL_INFO_TYPE_MCA
);
ia64_log_init
(
SAL_INFO_TYPE_INIT
);
ia64_log_init
(
SAL_INFO_TYPE_CMC
);
ia64_log_init
(
SAL_INFO_TYPE_CPE
);
#if defined(MCA_TEST)
mca_test
();
#endif
/* #if defined(MCA_TEST) */
printk
(
KERN_INFO
"Mca related initialization done
\n
"
);
/* commented out because this is done elsewhere */
#if 0
/* Do post-failure MCA error logging */
ia64_mca_check_errors();
#endif
}
/*
* ia64_mca_wakeup_ipi_wait
*
* Wait for the inter-cpu interrupt to be sent by the
* monarch processor once it is done with handling the
* MCA.
*
* Inputs : None
* Outputs : None
*/
void
ia64_mca_wakeup_ipi_wait
(
void
)
{
int
irr_num
=
(
IA64_MCA_WAKEUP_VECTOR
>>
6
);
int
irr_bit
=
(
IA64_MCA_WAKEUP_VECTOR
&
0x3f
);
u64
irr
=
0
;
do
{
switch
(
irr_num
)
{
case
0
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR0
);
break
;
case
1
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR1
);
break
;
case
2
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR2
);
break
;
case
3
:
irr
=
ia64_getreg
(
_IA64_REG_CR_IRR3
);
break
;
}
}
while
(
!
(
irr
&
(
1UL
<<
irr_bit
)))
;
}
/*
* ia64_mca_wakeup
*
* Send an inter-cpu interrupt to wake-up a particular cpu
* and mark that cpu to be out of rendez.
*
* Inputs : cpuid
* Outputs : None
*/
void
ia64_mca_wakeup
(
int
cpu
)
{
platform_send_ipi
(
cpu
,
IA64_MCA_WAKEUP_VECTOR
,
IA64_IPI_DM_INT
,
0
);
ia64_mc_info
.
imi_rendez_checkin
[
cpu
]
=
IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
}
/*
* ia64_mca_wakeup_all
*
* Wakeup all the cpus which have rendez'ed previously.
*
* Inputs : None
* Outputs : None
*/
void
ia64_mca_wakeup_all
(
void
)
static
void
ia64_mca_wakeup_all
(
void
)
{
int
cpu
;
...
...
@@ -909,7 +720,7 @@ ia64_mca_wakeup_all(void)
* Inputs : None
* Outputs : None
*/
irqreturn_t
static
irqreturn_t
ia64_mca_rendez_int_handler
(
int
rendez_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
)
{
unsigned
long
flags
;
...
...
@@ -935,7 +746,6 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
return
IRQ_HANDLED
;
}
/*
* ia64_mca_wakeup_int_handler
*
...
...
@@ -951,7 +761,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
* Outputs : None
*
*/
irqreturn_t
static
irqreturn_t
ia64_mca_wakeup_int_handler
(
int
wakeup_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
)
{
return
IRQ_HANDLED
;
...
...
@@ -971,11 +781,9 @@ ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
* Outputs : None
*/
void
ia64_return_to_sal_check
(
void
)
static
void
ia64_return_to_sal_check
(
int
recover
)
{
pal_processor_state_info_t
*
psp
=
(
pal_processor_state_info_t
*
)
&
ia64_sal_to_os_handoff_state
.
proc_state_param
;
/* Copy over some relevant stuff from the sal_to_os_mca_handoff
* so that it can be used at the time of os_mca_to_sal_handoff
...
...
@@ -986,15 +794,10 @@ ia64_return_to_sal_check(void)
ia64_os_to_sal_handoff_state
.
imots_sal_check_ra
=
ia64_sal_to_os_handoff_state
.
imsto_sal_check_ra
;
/*
* Did we correct the error? At the moment the only error that
* we fix is a TLB error, if any other kind of error occurred
* we must reboot.
*/
if
(
psp
->
cc
==
1
&&
psp
->
bc
==
1
&&
psp
->
rc
==
1
&&
psp
->
uc
==
1
)
ia64_os_to_sal_handoff_state
.
imots_os_status
=
IA64_MCA_COLD_BOOT
;
else
if
(
recover
)
ia64_os_to_sal_handoff_state
.
imots_os_status
=
IA64_MCA_CORRECTED
;
else
ia64_os_to_sal_handoff_state
.
imots_os_status
=
IA64_MCA_COLD_BOOT
;
/* Default = tell SAL to return to same context */
ia64_os_to_sal_handoff_state
.
imots_context
=
IA64_MCA_SAME_CONTEXT
;
...
...
@@ -1023,16 +826,12 @@ ia64_return_to_sal_check(void)
void
ia64_mca_ucmc_handler
(
void
)
{
int
platform_err
=
0
;
pal_processor_state_info_t
*
psp
=
(
pal_processor_state_info_t
*
)
&
ia64_sal_to_os_handoff_state
.
proc_state_param
;
int
recover
=
psp
->
tc
&&
!
(
psp
->
cc
||
psp
->
bc
||
psp
->
rc
||
psp
->
uc
);
/* Get the MCA error record and log it */
platform_err
=
ia64_mca_log_sal_error_record
(
SAL_INFO_TYPE_MCA
,
0
);
/*
* Do Platform-specific mca error handling if required.
*/
if
(
platform_err
)
mca_handler_platform
();
ia64_mca_log_sal_error_record
(
SAL_INFO_TYPE_MCA
,
0
);
/*
* Wakeup all the processors which are spinning in the rendezvous
...
...
@@ -1041,7 +840,7 @@ ia64_mca_ucmc_handler(void)
ia64_mca_wakeup_all
();
/* Return to SAL */
ia64_return_to_sal_check
();
ia64_return_to_sal_check
(
recover
);
}
static
DECLARE_WORK
(
cmc_disable_work
,
ia64_mca_cmc_vector_disable_keventd
,
NULL
);
...
...
@@ -1062,15 +861,15 @@ static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
* Outputs
* None
*/
irqreturn_t
static
irqreturn_t
ia64_mca_cmc_int_handler
(
int
cmc_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
)
{
static
unsigned
long
cmc_history
[
CMC_HISTORY_LENGTH
];
static
int
index
;
static
spinlock_t
cmc_history_lock
=
SPIN_LOCK_UNLOCKED
;
IA64_MCA_DEBUG
(
"
ia64_mca_cmc_int_handler
: received interrupt vector = %#x on CPU %d
\n
"
,
cmc_irq
,
smp_processor_id
());
IA64_MCA_DEBUG
(
"
%s
: received interrupt vector = %#x on CPU %d
\n
"
,
__FUNCTION__
,
cmc_irq
,
smp_processor_id
());
/* SAL spec states this should run w/ interrupts enabled */
local_irq_enable
();
...
...
@@ -1100,7 +899,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
* make sure there's a log somewhere that indicates
* something is generating more than we can handle.
*/
printk
(
KERN_WARNING
"
%s: WARNING: Switching to polling CMC handler, error records may be lost
\n
"
,
__FUNCTION__
);
printk
(
KERN_WARNING
"
WARNING: Switching to polling CMC handler; error records may be lost
\n
"
);
mod_timer
(
&
cmc_poll_timer
,
jiffies
+
CMC_POLL_INTERVAL
);
...
...
@@ -1116,41 +915,6 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
return
IRQ_HANDLED
;
}
/*
* IA64_MCA log support
*/
#define IA64_MAX_LOGS 2
/* Double-buffering for nested MCAs */
#define IA64_MAX_LOG_TYPES 4
/* MCA, INIT, CMC, CPE */
typedef
struct
ia64_state_log_s
{
spinlock_t
isl_lock
;
int
isl_index
;
unsigned
long
isl_count
;
ia64_err_rec_t
*
isl_log
[
IA64_MAX_LOGS
];
/* need space to store header + error log */
}
ia64_state_log_t
;
static
ia64_state_log_t
ia64_state_log
[
IA64_MAX_LOG_TYPES
];
#define IA64_LOG_ALLOCATE(it, size) \
{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
(ia64_err_rec_t *)alloc_bootmem(size); \
ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
(ia64_err_rec_t *)alloc_bootmem(size);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
#define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
#define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
#define IA64_LOG_INDEX_INC(it) \
{ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
ia64_state_log[it].isl_count++;}
#define IA64_LOG_INDEX_DEC(it) \
ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
#define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
/*
* ia64_mca_cmc_int_caller
*
...
...
@@ -1165,7 +929,7 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
* Outputs
* handled
*/
irqreturn_t
static
irqreturn_t
ia64_mca_cmc_int_caller
(
int
cpe_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
)
{
static
int
start_count
=
-
1
;
...
...
@@ -1184,10 +948,10 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
if
(
cpuid
<
NR_CPUS
)
{
platform_send_ipi
(
cpuid
,
IA64_CMCP_VECTOR
,
IA64_IPI_DM_INT
,
0
);
}
else
{
/* If no log recor
e
d, switch out of polling mode */
/* If no log record, switch out of polling mode */
if
(
start_count
==
IA64_LOG_COUNT
(
SAL_INFO_TYPE_CMC
))
{
printk
(
KERN_WARNING
"
%s: Returning to interrupt driven CMC handler
\n
"
,
__FUNCTION__
);
printk
(
KERN_WARNING
"
Returning to interrupt driven CMC handler
\n
"
);
schedule_work
(
&
cmc_enable_work
);
cmc_polling_enabled
=
0
;
...
...
@@ -1232,7 +996,7 @@ ia64_mca_cmc_poll (unsigned long dummy)
* Outputs
* handled
*/
irqreturn_t
static
irqreturn_t
ia64_mca_cpe_int_caller
(
int
cpe_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
)
{
static
int
start_count
=
-
1
;
...
...
@@ -1286,44 +1050,9 @@ ia64_mca_cpe_poll (unsigned long dummy)
}
/*
*
ia64_mca_late_init
*
C portion of the OS INIT handler
*
* Opportunity to setup things that require initialization later
* than ia64_mca_init. Setup a timer to poll for CPEs if the
* platform doesn't support an interrupt driven mechanism.
*
* Inputs : None
* Outputs : Status
*/
static
int
__init
ia64_mca_late_init
(
void
)
{
init_timer
(
&
cmc_poll_timer
);
cmc_poll_timer
.
function
=
ia64_mca_cmc_poll
;
/* Reset to the correct state */
cmc_polling_enabled
=
0
;
init_timer
(
&
cpe_poll_timer
);
cpe_poll_timer
.
function
=
ia64_mca_cpe_poll
;
#ifdef CONFIG_ACPI
/* If platform doesn't support CPEI, get the timer going. */
if
(
acpi_request_vector
(
ACPI_INTERRUPT_CPEI
)
<
0
&&
cpe_poll_enabled
)
{
register_percpu_irq
(
IA64_CPEP_VECTOR
,
&
mca_cpep_irqaction
);
ia64_mca_cpe_poll
(
0UL
);
}
#endif
return
0
;
}
device_initcall
(
ia64_mca_late_init
);
/*
* C portion of the OS INIT handler
*
* Called from ia64_monarch_init_handler
* Called from ia64_monarch_init_handler
*
* Inputs: pointer to pt_regs where processor info was saved.
*
...
...
@@ -1337,6 +1066,8 @@ ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
{
pal_min_state_area_t
*
ms
;
oops_in_progress
=
1
;
/* avoid deadlock in printk, but it makes recovery dodgy */
printk
(
KERN_INFO
"Entered OS INIT handler. PSP=%lx
\n
"
,
ia64_sal_to_os_handoff_state
.
proc_state_param
);
...
...
@@ -1350,1083 +1081,256 @@ ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
init_handler_platform
(
ms
,
pt
,
sw
);
/* call platform specific routines */
}
/*
* ia64_log_prt_guid
*
* Print a formatted GUID.
*
* Inputs : p_guid (ptr to the GUID)
* prfunc (print function)
* Outputs : None
*
*/
void
ia64_log_prt_guid
(
efi_guid_t
*
p_guid
,
prfunc_t
prfunc
)
{
char
out
[
40
];
printk
(
KERN_DEBUG
"GUID = %s
\n
"
,
efi_guid_unparse
(
p_guid
,
out
));
}
static
void
ia64_log_hexdump
(
unsigned
char
*
p
,
unsigned
long
n_ch
,
prfunc_t
prfunc
)
static
int
__init
ia64_mca_disable_cpe_polling
(
char
*
str
)
{
unsigned
long
i
;
int
j
;
if
(
!
p
)
return
;
for
(
i
=
0
;
i
<
n_ch
;)
{
prfunc
(
"%p "
,
(
void
*
)
p
);
for
(
j
=
0
;
(
j
<
16
)
&&
(
i
<
n_ch
);
i
++
,
j
++
,
p
++
)
{
prfunc
(
"%02x "
,
*
p
);
}
prfunc
(
"
\n
"
);
}
cpe_poll_enabled
=
0
;
return
1
;
}
#ifdef MCA_PRT_XTRA_DATA // for test only @FVL
__setup
(
"disable_cpe_poll"
,
ia64_mca_disable_cpe_polling
);
static
void
ia64_log_prt_record_header
(
sal_log_record_header_t
*
rh
,
prfunc_t
prfunc
)
{
prfunc
(
"SAL RECORD HEADER: Record buffer = %p, header size = %ld
\n
"
,
(
void
*
)
rh
,
sizeof
(
sal_log_record_header_t
));
ia64_log_hexdump
((
unsigned
char
*
)
rh
,
sizeof
(
sal_log_record_header_t
),
(
prfunc_t
)
prfunc
);
prfunc
(
"Total record length = %d
\n
"
,
rh
->
len
);
ia64_log_prt_guid
(
&
rh
->
platform_guid
,
prfunc
);
prfunc
(
"End of SAL RECORD HEADER
\n
"
);
}
static
struct
irqaction
cmci_irqaction
=
{
.
handler
=
ia64_mca_cmc_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cmc_hndlr"
};
static
void
ia64_log_prt_section_header
(
sal_log_section_hdr_t
*
sh
,
prfunc_t
prfunc
)
{
prfunc
(
"SAL SECTION HEADER: Record buffer = %p, header size = %ld
\n
"
,
(
void
*
)
sh
,
sizeof
(
sal_log_section_hdr_t
));
ia64_log_hexdump
((
unsigned
char
*
)
sh
,
sizeof
(
sal_log_section_hdr_t
),
(
prfunc_t
)
prfunc
);
prfunc
(
"Length of section & header = %d
\n
"
,
sh
->
len
);
ia64_log_prt_guid
(
&
sh
->
guid
,
prfunc
);
prfunc
(
"End of SAL SECTION HEADER
\n
"
);
}
#endif // MCA_PRT_XTRA_DATA for test only @FVL
static
struct
irqaction
cmcp_irqaction
=
{
.
handler
=
ia64_mca_cmc_int_caller
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cmc_poll"
};
/*
* ia64_log_init
* Reset the OS ia64 log buffer
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
* Outputs : None
*/
void
ia64_log_init
(
int
sal_info_type
)
{
u64
max_size
=
0
;
static
struct
irqaction
mca_rdzv_irqaction
=
{
.
handler
=
ia64_mca_rendez_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"mca_rdzv"
};
IA64_LOG_NEXT_INDEX
(
sal_info_type
)
=
0
;
IA64_LOG_LOCK_INIT
(
sal_info_type
);
static
struct
irqaction
mca_wkup_irqaction
=
{
.
handler
=
ia64_mca_wakeup_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"mca_wkup"
};
// SAL will tell us the maximum size of any error record of this type
max_size
=
ia64_sal_get_state_info_size
(
sal_info_type
);
if
(
!
max_size
)
/* alloc_bootmem() doesn't like zero-sized allocations! */
return
;
#ifdef CONFIG_ACPI
static
struct
irqaction
mca_cpe_irqaction
=
{
.
handler
=
ia64_mca_cpe_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cpe_hndlr"
};
// set up OS data structures to hold error info
IA64_LOG_ALLOCATE
(
sal_info_type
,
max_size
);
memset
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
0
,
max_size
);
memset
(
IA64_LOG_NEXT_BUFFER
(
sal_info_type
),
0
,
max_size
);
}
static
struct
irqaction
mca_cpep_irqaction
=
{
.
handler
=
ia64_mca_cpe_int_caller
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cpe_poll"
};
#endif
/* CONFIG_ACPI */
/*
* ia64_
log_ge
t
* ia64_
mca_ini
t
*
*
Get the current MCA log from SAL and copy it into the OS log buffer
.
*
Do all the system level mca specific initialization
.
*
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
* Outputs : size (total record length)
* *buffer (ptr to error record)
* 1. Register spinloop and wakeup request interrupt vectors
*
*/
static
u64
ia64_log_get
(
int
sal_info_type
,
u8
**
buffer
)
{
sal_log_record_header_t
*
log_buffer
;
u64
total_len
=
0
;
int
s
;
IA64_LOG_LOCK
(
sal_info_type
);
/* Get the process state information */
log_buffer
=
IA64_LOG_NEXT_BUFFER
(
sal_info_type
);
total_len
=
ia64_sal_get_state_info
(
sal_info_type
,
(
u64
*
)
log_buffer
);
if
(
total_len
)
{
IA64_LOG_INDEX_INC
(
sal_info_type
);
IA64_LOG_UNLOCK
(
sal_info_type
);
IA64_MCA_DEBUG
(
"ia64_log_get: SAL error record type %d retrieved. "
"Record length = %ld
\n
"
,
sal_info_type
,
total_len
);
*
buffer
=
(
u8
*
)
log_buffer
;
return
total_len
;
}
else
{
IA64_LOG_UNLOCK
(
sal_info_type
);
return
0
;
}
}
/*
* ia64_log_prt_oem_data
* 2. Register OS_MCA handler entry point
*
*
Print OEM specific data if included.
*
3. Register OS_INIT handler entry point
*
* Inputs : header_len (length passed in section header)
* sect_len (default length of section type)
* p_data (ptr to data)
* prfunc (print function)
* Outputs : None
* 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
*
*/
void
ia64_log_prt_oem_data
(
int
header_len
,
int
sect_len
,
u8
*
p_data
,
prfunc_t
prfunc
)
{
int
oem_data_len
,
i
;
if
((
oem_data_len
=
header_len
-
sect_len
)
>
0
)
{
prfunc
(
" OEM Specific Data:"
);
for
(
i
=
0
;
i
<
oem_data_len
;
i
++
,
p_data
++
)
prfunc
(
" %02x"
,
*
p_data
);
}
prfunc
(
"
\n
"
);
}
/*
* ia64_log_rec_header_print
* Note that this initialization is done very early before some kernel
* services are available.
*
*
Log info from the SAL error record header.
*
Inputs : None
*
* Inputs : lh * (ptr to SAL log error record header)
* prfunc (fn ptr of log output function to use)
* Outputs : None
*/
void
ia64_log_rec_header_print
(
sal_log_record_header_t
*
lh
,
prfunc_t
prfunc
)
{
prfunc
(
"+Err Record ID: %ld SAL Rev: %2x.%02x
\n
"
,
lh
->
id
,
lh
->
revision
.
major
,
lh
->
revision
.
minor
);
prfunc
(
"+Time: %02x/%02x/%02x%02x %02x:%02x:%02x Severity %d
\n
"
,
lh
->
timestamp
.
slh_month
,
lh
->
timestamp
.
slh_day
,
lh
->
timestamp
.
slh_century
,
lh
->
timestamp
.
slh_year
,
lh
->
timestamp
.
slh_hour
,
lh
->
timestamp
.
slh_minute
,
lh
->
timestamp
.
slh_second
,
lh
->
severity
);
}
/*
* ia64_log_processor_regs_print
* Print the contents of the saved processor register(s) in the format
* <reg_prefix>[<index>] <value>
*
* Inputs : regs (Register save buffer)
* reg_num (# of registers)
* reg_class (application/banked/control/bank1_general)
* reg_prefix (ar/br/cr/b1_gr)
* Outputs : None
*
*/
void
ia64_log_processor_regs_print
(
u64
*
regs
,
int
reg_num
,
char
*
reg_class
,
char
*
reg_prefix
,
prfunc_t
prfunc
)
{
int
i
;
prfunc
(
"+%s Registers
\n
"
,
reg_class
);
for
(
i
=
0
;
i
<
reg_num
;
i
++
)
prfunc
(
"+ %s[%d] 0x%lx
\n
"
,
reg_prefix
,
i
,
regs
[
i
]);
}
/*
* ia64_log_processor_fp_regs_print
* Print the contents of the saved floating page register(s) in the format
* <reg_prefix>[<index>] <value>
*
* Inputs: ia64_fpreg (Register save buffer)
* reg_num (# of registers)
* reg_class (application/banked/control/bank1_general)
* reg_prefix (ar/br/cr/b1_gr)
* Outputs: None
*
*/
void
ia64_log_processor_fp_regs_print
(
struct
ia64_fpreg
*
regs
,
int
reg_num
,
char
*
reg_class
,
char
*
reg_prefix
,
prfunc_t
prfunc
)
{
int
i
;
prfunc
(
"+%s Registers
\n
"
,
reg_class
);
for
(
i
=
0
;
i
<
reg_num
;
i
++
)
prfunc
(
"+ %s[%d] 0x%lx%016lx
\n
"
,
reg_prefix
,
i
,
regs
[
i
].
u
.
bits
[
1
],
regs
[
i
].
u
.
bits
[
0
]);
}
static
char
*
pal_mesi_state
[]
=
{
"Invalid"
,
"Shared"
,
"Exclusive"
,
"Modified"
,
"Reserved1"
,
"Reserved2"
,
"Reserved3"
,
"Reserved4"
};
static
char
*
pal_cache_op
[]
=
{
"Unknown"
,
"Move in"
,
"Cast out"
,
"Coherency check"
,
"Internal"
,
"Instruction fetch"
,
"Implicit Writeback"
,
"Reserved"
};
/*
* ia64_log_cache_check_info_print
* Display the machine check information related to cache error(s).
* Inputs: i (Multiple errors are logged, i - index of logged error)
* cc_info * (Ptr to cache check info logged by the PAL and later
* captured by the SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_cache_check_info_print
(
int
i
,
sal_log_mod_error_info_t
*
cache_check_info
,
prfunc_t
prfunc
)
{
pal_cache_check_info_t
*
info
;
u64
target_addr
;
if
(
!
cache_check_info
->
valid
.
check_info
)
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: invalid cache_check_info[%d]
\n
"
,
i
);
return
;
/* If check info data not valid, skip it */
}
info
=
(
pal_cache_check_info_t
*
)
&
cache_check_info
->
check_info
;
target_addr
=
cache_check_info
->
target_identifier
;
prfunc
(
"+ Cache check info[%d]
\n
+"
,
i
);
prfunc
(
" Level: L%d,"
,
info
->
level
);
if
(
info
->
mv
)
prfunc
(
" Mesi: %s,"
,
pal_mesi_state
[
info
->
mesi
]);
prfunc
(
" Index: %d,"
,
info
->
index
);
if
(
info
->
ic
)
prfunc
(
" Cache: Instruction,"
);
if
(
info
->
dc
)
prfunc
(
" Cache: Data,"
);
if
(
info
->
tl
)
prfunc
(
" Line: Tag,"
);
if
(
info
->
dl
)
prfunc
(
" Line: Data,"
);
prfunc
(
" Operation: %s,"
,
pal_cache_op
[
info
->
op
]);
if
(
info
->
wiv
)
prfunc
(
" Way: %d,"
,
info
->
way
);
if
(
cache_check_info
->
valid
.
target_identifier
)
/* Hope target address is saved in target_identifier */
if
(
info
->
tv
)
prfunc
(
" Target Addr: 0x%lx,"
,
target_addr
);
if
(
info
->
mcc
)
prfunc
(
" MC: Corrected"
);
prfunc
(
"
\n
"
);
}
/*
* ia64_log_tlb_check_info_print
* Display the machine check information related to tlb error(s).
* Inputs: i (Multiple errors are logged, i - index of logged error)
* tlb_info * (Ptr to machine check info logged by the PAL and later
* captured by the SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_tlb_check_info_print
(
int
i
,
sal_log_mod_error_info_t
*
tlb_check_info
,
prfunc_t
prfunc
)
{
pal_tlb_check_info_t
*
info
;
if
(
!
tlb_check_info
->
valid
.
check_info
)
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: invalid tlb_check_info[%d]
\n
"
,
i
);
return
;
/* If check info data not valid, skip it */
}
info
=
(
pal_tlb_check_info_t
*
)
&
tlb_check_info
->
check_info
;
prfunc
(
"+ TLB Check Info [%d]
\n
+"
,
i
);
if
(
info
->
itc
)
prfunc
(
" Failure: Instruction Translation Cache"
);
if
(
info
->
dtc
)
prfunc
(
" Failure: Data Translation Cache"
);
if
(
info
->
itr
)
{
prfunc
(
" Failure: Instruction Translation Register"
);
prfunc
(
" ,Slot: %ld"
,
info
->
tr_slot
);
}
if
(
info
->
dtr
)
{
prfunc
(
" Failure: Data Translation Register"
);
prfunc
(
" ,Slot: %ld"
,
info
->
tr_slot
);
}
if
(
info
->
mcc
)
prfunc
(
" ,MC: Corrected"
);
prfunc
(
"
\n
"
);
}
/*
* ia64_log_bus_check_info_print
* Display the machine check information related to bus error(s).
* Inputs: i (Multiple errors are logged, i - index of logged error)
* bus_info * (Ptr to machine check info logged by the PAL and later
* captured by the SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_bus_check_info_print
(
int
i
,
sal_log_mod_error_info_t
*
bus_check_info
,
prfunc_t
prfunc
)
{
pal_bus_check_info_t
*
info
;
u64
req_addr
;
/* Address of the requestor of the transaction */
u64
resp_addr
;
/* Address of the responder of the transaction */
u64
targ_addr
;
/* Address where the data was to be delivered to */
/* or obtained from */
if
(
!
bus_check_info
->
valid
.
check_info
)
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: invalid bus_check_info[%d]
\n
"
,
i
);
return
;
/* If check info data not valid, skip it */
}
info
=
(
pal_bus_check_info_t
*
)
&
bus_check_info
->
check_info
;
req_addr
=
bus_check_info
->
requestor_identifier
;
resp_addr
=
bus_check_info
->
responder_identifier
;
targ_addr
=
bus_check_info
->
target_identifier
;
prfunc
(
"+ BUS Check Info [%d]
\n
+"
,
i
);
prfunc
(
" Status Info: %d"
,
info
->
bsi
);
prfunc
(
" ,Severity: %d"
,
info
->
sev
);
prfunc
(
" ,Transaction Type: %d"
,
info
->
type
);
prfunc
(
" ,Transaction Size: %d"
,
info
->
size
);
if
(
info
->
cc
)
prfunc
(
" ,Cache-cache-transfer"
);
if
(
info
->
ib
)
prfunc
(
" ,Error: Internal"
);
if
(
info
->
eb
)
prfunc
(
" ,Error: External"
);
if
(
info
->
mcc
)
prfunc
(
" ,MC: Corrected"
);
if
(
info
->
tv
)
prfunc
(
" ,Target Address: 0x%lx"
,
targ_addr
);
if
(
info
->
rq
)
prfunc
(
" ,Requestor Address: 0x%lx"
,
req_addr
);
if
(
info
->
tv
)
prfunc
(
" ,Responder Address: 0x%lx"
,
resp_addr
);
prfunc
(
"
\n
"
);
}
/*
* ia64_log_mem_dev_err_info_print
*
* Format and log the platform memory device error record section data.
*
* Inputs: mem_dev_err_info * (Ptr to memory device error record section
* returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_mem_dev_err_info_print
(
sal_log_mem_dev_err_info_t
*
mdei
,
prfunc_t
prfunc
)
{
prfunc
(
"+ Mem Error Detail: "
);
if
(
mdei
->
valid
.
error_status
)
prfunc
(
" Error Status: %#lx,"
,
mdei
->
error_status
);
if
(
mdei
->
valid
.
physical_addr
)
prfunc
(
" Physical Address: %#lx,"
,
mdei
->
physical_addr
);
if
(
mdei
->
valid
.
addr_mask
)
prfunc
(
" Address Mask: %#lx,"
,
mdei
->
addr_mask
);
if
(
mdei
->
valid
.
node
)
prfunc
(
" Node: %d,"
,
mdei
->
node
);
if
(
mdei
->
valid
.
card
)
prfunc
(
" Card: %d,"
,
mdei
->
card
);
if
(
mdei
->
valid
.
module
)
prfunc
(
" Module: %d,"
,
mdei
->
module
);
if
(
mdei
->
valid
.
bank
)
prfunc
(
" Bank: %d,"
,
mdei
->
bank
);
if
(
mdei
->
valid
.
device
)
prfunc
(
" Device: %d,"
,
mdei
->
device
);
if
(
mdei
->
valid
.
row
)
prfunc
(
" Row: %d,"
,
mdei
->
row
);
if
(
mdei
->
valid
.
column
)
prfunc
(
" Column: %d,"
,
mdei
->
column
);
if
(
mdei
->
valid
.
bit_position
)
prfunc
(
" Bit Position: %d,"
,
mdei
->
bit_position
);
if
(
mdei
->
valid
.
target_id
)
prfunc
(
" ,Target Address: %#lx,"
,
mdei
->
target_id
);
if
(
mdei
->
valid
.
requestor_id
)
prfunc
(
" ,Requestor Address: %#lx,"
,
mdei
->
requestor_id
);
if
(
mdei
->
valid
.
responder_id
)
prfunc
(
" ,Responder Address: %#lx,"
,
mdei
->
responder_id
);
if
(
mdei
->
valid
.
bus_spec_data
)
prfunc
(
" Bus Specific Data: %#lx,"
,
mdei
->
bus_spec_data
);
prfunc
(
"
\n
"
);
if
(
mdei
->
valid
.
oem_id
)
{
u8
*
p_data
=
&
(
mdei
->
oem_id
[
0
]);
int
i
;
prfunc
(
" OEM Memory Controller ID:"
);
for
(
i
=
0
;
i
<
16
;
i
++
,
p_data
++
)
prfunc
(
" %02x"
,
*
p_data
);
prfunc
(
"
\n
"
);
}
if
(
mdei
->
valid
.
oem_data
)
{
platform_mem_dev_err_print
((
int
)
mdei
->
header
.
len
,
(
int
)
sizeof
(
sal_log_mem_dev_err_info_t
)
-
1
,
&
(
mdei
->
oem_data
[
0
]),
prfunc
);
}
}
/*
* ia64_log_sel_dev_err_info_print
*
* Format and log the platform SEL device error record section data.
*
* Inputs: sel_dev_err_info * (Ptr to the SEL device error record section
* returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_sel_dev_err_info_print
(
sal_log_sel_dev_err_info_t
*
sdei
,
prfunc_t
prfunc
)
void
__init
ia64_mca_init
(
void
)
{
ia64_fptr_t
*
mon_init_ptr
=
(
ia64_fptr_t
*
)
ia64_monarch_init_handler
;
ia64_fptr_t
*
slave_init_ptr
=
(
ia64_fptr_t
*
)
ia64_slave_init_handler
;
ia64_fptr_t
*
mca_hldlr_ptr
=
(
ia64_fptr_t
*
)
ia64_os_mca_dispatch
;
int
i
;
s64
rc
;
struct
ia64_sal_retval
isrv
;
u64
timeout
=
IA64_MCA_RENDEZ_TIMEOUT
;
/* platform specific */
prfunc
(
"+ SEL Device Error Detail: "
);
if
(
sdei
->
valid
.
record_id
)
prfunc
(
" Record ID: %#x"
,
sdei
->
record_id
);
if
(
sdei
->
valid
.
record_type
)
prfunc
(
" Record Type: %#x"
,
sdei
->
record_type
);
prfunc
(
" Time Stamp: "
);
for
(
i
=
0
;
i
<
4
;
i
++
)
prfunc
(
"%1d"
,
sdei
->
timestamp
[
i
]);
if
(
sdei
->
valid
.
generator_id
)
prfunc
(
" Generator ID: %#x"
,
sdei
->
generator_id
);
if
(
sdei
->
valid
.
evm_rev
)
prfunc
(
" Message Format Version: %#x"
,
sdei
->
evm_rev
);
if
(
sdei
->
valid
.
sensor_type
)
prfunc
(
" Sensor Type: %#x"
,
sdei
->
sensor_type
);
if
(
sdei
->
valid
.
sensor_num
)
prfunc
(
" Sensor Number: %#x"
,
sdei
->
sensor_num
);
if
(
sdei
->
valid
.
event_dir
)
prfunc
(
" Event Direction Type: %#x"
,
sdei
->
event_dir
);
if
(
sdei
->
valid
.
event_data1
)
prfunc
(
" Data1: %#x"
,
sdei
->
event_data1
);
if
(
sdei
->
valid
.
event_data2
)
prfunc
(
" Data2: %#x"
,
sdei
->
event_data2
);
if
(
sdei
->
valid
.
event_data3
)
prfunc
(
" Data3: %#x"
,
sdei
->
event_data3
);
prfunc
(
"
\n
"
);
}
IA64_MCA_DEBUG
(
"%s: begin
\n
"
,
__FUNCTION__
);
/*
* ia64_log_pci_bus_err_info_print
*
* Format and log the platform PCI bus error record section data.
*
* Inputs: pci_bus_err_info * (Ptr to the PCI bus error record section
* returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_pci_bus_err_info_print
(
sal_log_pci_bus_err_info_t
*
pbei
,
prfunc_t
prfunc
)
{
prfunc
(
"+ PCI Bus Error Detail: "
);
if
(
pbei
->
valid
.
err_status
)
prfunc
(
" Error Status: %#lx"
,
pbei
->
err_status
);
if
(
pbei
->
valid
.
err_type
)
prfunc
(
" Error Type: %#x"
,
pbei
->
err_type
);
if
(
pbei
->
valid
.
bus_id
)
prfunc
(
" Bus ID: %#x"
,
pbei
->
bus_id
);
if
(
pbei
->
valid
.
bus_address
)
prfunc
(
" Bus Address: %#lx"
,
pbei
->
bus_address
);
if
(
pbei
->
valid
.
bus_data
)
prfunc
(
" Bus Data: %#lx"
,
pbei
->
bus_data
);
if
(
pbei
->
valid
.
bus_cmd
)
prfunc
(
" Bus Command: %#lx"
,
pbei
->
bus_cmd
);
if
(
pbei
->
valid
.
requestor_id
)
prfunc
(
" Requestor ID: %#lx"
,
pbei
->
requestor_id
);
if
(
pbei
->
valid
.
responder_id
)
prfunc
(
" Responder ID: %#lx"
,
pbei
->
responder_id
);
if
(
pbei
->
valid
.
target_id
)
prfunc
(
" Target ID: %#lx"
,
pbei
->
target_id
);
if
(
pbei
->
valid
.
oem_data
)
prfunc
(
"
\n
"
);
if
(
pbei
->
valid
.
oem_data
)
{
platform_pci_bus_err_print
((
int
)
pbei
->
header
.
len
,
(
int
)
sizeof
(
sal_log_pci_bus_err_info_t
)
-
1
,
&
(
pbei
->
oem_data
[
0
]),
prfunc
);
}
}
/* Clear the Rendez checkin flag for all cpus */
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
ia64_mc_info
.
imi_rendez_checkin
[
i
]
=
IA64_MCA_RENDEZ_CHECKIN_NOTDONE
;
/*
* ia64_log_smbios_dev_err_info_print
*
* Format and log the platform SMBIOS device error record section data.
*
* Inputs: smbios_dev_err_info * (Ptr to the SMBIOS device error record
* section returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
/*
* Register the rendezvous spinloop and wakeup mechanism with SAL
*/
void
ia64_log_smbios_dev_err_info_print
(
sal_log_smbios_dev_err_info_t
*
sdei
,
prfunc_t
prfunc
)
{
u8
i
;
prfunc
(
"+ SMBIOS Device Error Detail: "
);
if
(
sdei
->
valid
.
event_type
)
prfunc
(
" Event Type: %#x"
,
sdei
->
event_type
);
if
(
sdei
->
valid
.
time_stamp
)
{
prfunc
(
" Time Stamp: "
);
for
(
i
=
0
;
i
<
6
;
i
++
)
prfunc
(
"%d"
,
sdei
->
time_stamp
[
i
]);
/* Register the rendezvous interrupt vector with SAL */
while
(
1
)
{
isrv
=
ia64_sal_mc_set_params
(
SAL_MC_PARAM_RENDEZ_INT
,
SAL_MC_PARAM_MECHANISM_INT
,
IA64_MCA_RENDEZ_VECTOR
,
timeout
,
SAL_MC_PARAM_RZ_ALWAYS
);
rc
=
isrv
.
status
;
if
(
rc
==
0
)
break
;
if
(
rc
==
-
2
)
{
printk
(
KERN_INFO
"Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds
\n
"
,
timeout
,
isrv
.
v0
);
timeout
=
isrv
.
v0
;
continue
;
}
if
((
sdei
->
valid
.
data
)
&&
(
sdei
->
valid
.
length
))
{
prfunc
(
" Data: "
);
for
(
i
=
0
;
i
<
sdei
->
length
;
i
++
)
prfunc
(
" %02x"
,
sdei
->
data
[
i
]);
printk
(
KERN_ERR
"Failed to register rendezvous interrupt "
"with SAL (status %ld)
\n
"
,
rc
);
return
;
}
prfunc
(
"
\n
"
);
}
/*
* ia64_log_pci_comp_err_info_print
*
* Format and log the platform PCI component error record section data.
*
* Inputs: pci_comp_err_info * (Ptr to the PCI component error record section
* returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_pci_comp_err_info_print
(
sal_log_pci_comp_err_info_t
*
pcei
,
prfunc_t
prfunc
)
{
u32
n_mem_regs
,
n_io_regs
;
u64
i
,
n_pci_data
;
u64
*
p_reg_data
;
u8
*
p_oem_data
;
prfunc
(
"+ PCI Component Error Detail: "
);
if
(
pcei
->
valid
.
err_status
)
prfunc
(
" Error Status: %#lx
\n
"
,
pcei
->
err_status
);
if
(
pcei
->
valid
.
comp_info
)
prfunc
(
" Component Info: Vendor Id = %#x, Device Id = %#x,"
" Class Code = %#x, Seg/Bus/Dev/Func = %d/%d/%d/%d
\n
"
,
pcei
->
comp_info
.
vendor_id
,
pcei
->
comp_info
.
device_id
,
pcei
->
comp_info
.
class_code
,
pcei
->
comp_info
.
seg_num
,
pcei
->
comp_info
.
bus_num
,
pcei
->
comp_info
.
dev_num
,
pcei
->
comp_info
.
func_num
);
n_mem_regs
=
(
pcei
->
valid
.
num_mem_regs
)
?
pcei
->
num_mem_regs
:
0
;
n_io_regs
=
(
pcei
->
valid
.
num_io_regs
)
?
pcei
->
num_io_regs
:
0
;
p_reg_data
=
&
(
pcei
->
reg_data_pairs
[
0
]);
p_oem_data
=
(
u8
*
)
p_reg_data
+
(
n_mem_regs
+
n_io_regs
)
*
2
*
sizeof
(
u64
);
n_pci_data
=
p_oem_data
-
(
u8
*
)
pcei
;
if
(
n_pci_data
>
pcei
->
header
.
len
)
{
prfunc
(
" Invalid PCI Component Error Record format: length = %ld, "
" Size PCI Data = %d, Num Mem-Map/IO-Map Regs = %ld/%ld
\n
"
,
pcei
->
header
.
len
,
n_pci_data
,
n_mem_regs
,
n_io_regs
);
/* Register the wakeup interrupt vector with SAL */
isrv
=
ia64_sal_mc_set_params
(
SAL_MC_PARAM_RENDEZ_WAKEUP
,
SAL_MC_PARAM_MECHANISM_INT
,
IA64_MCA_WAKEUP_VECTOR
,
0
,
0
);
rc
=
isrv
.
status
;
if
(
rc
)
{
printk
(
KERN_ERR
"Failed to register wakeup interrupt with SAL "
"(status %ld)
\n
"
,
rc
);
return
;
}
if
(
n_mem_regs
)
{
prfunc
(
" Memory Mapped Registers
\n
Address
\t
Value
\n
"
);
for
(
i
=
0
;
i
<
pcei
->
num_mem_regs
;
i
++
)
{
prfunc
(
" %#lx %#lx
\n
"
,
p_reg_data
[
0
],
p_reg_data
[
1
]);
p_reg_data
+=
2
;
}
}
if
(
n_io_regs
)
{
prfunc
(
" I/O Mapped Registers
\n
Address
\t
Value
\n
"
);
for
(
i
=
0
;
i
<
pcei
->
num_io_regs
;
i
++
)
{
prfunc
(
" %#lx %#lx
\n
"
,
p_reg_data
[
0
],
p_reg_data
[
1
]);
p_reg_data
+=
2
;
}
}
if
(
pcei
->
valid
.
oem_data
)
{
platform_pci_comp_err_print
((
int
)
pcei
->
header
.
len
,
n_pci_data
,
p_oem_data
,
prfunc
);
prfunc
(
"
\n
"
);
}
}
IA64_MCA_DEBUG
(
"%s: registered MCA rendezvous spinloop and wakeup mech.
\n
"
,
__FUNCTION__
);
/*
* ia64_log_plat_specific_err_info_print
*
* Format and log the platform specifie error record section data.
*
* Inputs: sel_dev_err_info * (Ptr to the platform specific error record
* section returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
ia64_mc_info
.
imi_mca_handler
=
ia64_tpa
(
mca_hldlr_ptr
->
fp
);
/*
* XXX - disable SAL checksum by setting size to 0; should be
* ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
*/
void
ia64_log_plat_specific_err_info_print
(
sal_log_plat_specific_err_info_t
*
psei
,
prfunc_t
prfunc
)
{
prfunc
(
"+ Platform Specific Error Detail: "
);
if
(
psei
->
valid
.
err_status
)
prfunc
(
" Error Status: %#lx"
,
psei
->
err_status
);
if
(
psei
->
valid
.
guid
)
{
prfunc
(
" GUID: "
);
ia64_log_prt_guid
(
&
psei
->
guid
,
prfunc
);
}
if
(
psei
->
valid
.
oem_data
)
{
platform_plat_specific_err_print
((
int
)
psei
->
header
.
len
,
(
char
*
)
psei
->
oem_data
-
(
char
*
)
psei
,
&
psei
->
oem_data
[
0
],
prfunc
);
}
prfunc
(
"
\n
"
);
}
ia64_mc_info
.
imi_mca_handler_size
=
0
;
/*
* ia64_log_host_ctlr_err_info_print
*
* Format and log the platform host controller error record section data.
*
* Inputs: host_ctlr_err_info * (Ptr to the host controller error record
* section returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_host_ctlr_err_info_print
(
sal_log_host_ctlr_err_info_t
*
hcei
,
prfunc_t
prfunc
)
{
prfunc
(
"+ Host Controller Error Detail: "
);
if
(
hcei
->
valid
.
err_status
)
prfunc
(
" Error Status: %#lx"
,
hcei
->
err_status
);
if
(
hcei
->
valid
.
requestor_id
)
prfunc
(
" Requestor ID: %#lx"
,
hcei
->
requestor_id
);
if
(
hcei
->
valid
.
responder_id
)
prfunc
(
" Responder ID: %#lx"
,
hcei
->
responder_id
);
if
(
hcei
->
valid
.
target_id
)
prfunc
(
" Target ID: %#lx"
,
hcei
->
target_id
);
if
(
hcei
->
valid
.
bus_spec_data
)
prfunc
(
" Bus Specific Data: %#lx"
,
hcei
->
bus_spec_data
);
if
(
hcei
->
valid
.
oem_data
)
{
platform_host_ctlr_err_print
((
int
)
hcei
->
header
.
len
,
(
int
)
sizeof
(
sal_log_host_ctlr_err_info_t
)
-
1
,
&
(
hcei
->
oem_data
[
0
]),
prfunc
);
/* Register the os mca handler with SAL */
if
((
rc
=
ia64_sal_set_vectors
(
SAL_VECTOR_OS_MCA
,
ia64_mc_info
.
imi_mca_handler
,
ia64_tpa
(
mca_hldlr_ptr
->
gp
),
ia64_mc_info
.
imi_mca_handler_size
,
0
,
0
,
0
)))
{
printk
(
KERN_ERR
"Failed to register OS MCA handler with SAL "
"(status %ld)
\n
"
,
rc
);
return
;
}
prfunc
(
"
\n
"
);
}
/*
* ia64_log_plat_bus_err_info_print
*
* Format and log the platform bus error record section data.
*
* Inputs: plat_bus_err_info * (Ptr to the platform bus error record section
* returned by SAL)
* prfunc (fn ptr of print function to be used for output)
* Outputs: None
*/
void
ia64_log_plat_bus_err_info_print
(
sal_log_plat_bus_err_info_t
*
pbei
,
prfunc_t
prfunc
)
{
prfunc
(
"+ Platform Bus Error Detail: "
);
if
(
pbei
->
valid
.
err_status
)
prfunc
(
" Error Status: %#lx"
,
pbei
->
err_status
);
if
(
pbei
->
valid
.
requestor_id
)
prfunc
(
" Requestor ID: %#lx"
,
pbei
->
requestor_id
);
if
(
pbei
->
valid
.
responder_id
)
prfunc
(
" Responder ID: %#lx"
,
pbei
->
responder_id
);
if
(
pbei
->
valid
.
target_id
)
prfunc
(
" Target ID: %#lx"
,
pbei
->
target_id
);
if
(
pbei
->
valid
.
bus_spec_data
)
prfunc
(
" Bus Specific Data: %#lx"
,
pbei
->
bus_spec_data
);
if
(
pbei
->
valid
.
oem_data
)
{
platform_plat_bus_err_print
((
int
)
pbei
->
header
.
len
,
(
int
)
sizeof
(
sal_log_plat_bus_err_info_t
)
-
1
,
&
(
pbei
->
oem_data
[
0
]),
prfunc
);
}
prfunc
(
"
\n
"
);
}
IA64_MCA_DEBUG
(
"%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx
\n
"
,
__FUNCTION__
,
ia64_mc_info
.
imi_mca_handler
,
ia64_tpa
(
mca_hldlr_ptr
->
gp
));
/*
* ia64_log_proc_dev_err_info_print
*
* Display the processor device error record.
*
* Inputs: sal_log_processor_info_t * (Ptr to processor device error record
* section body).
* prfunc (fn ptr of print function to be used
* for output).
* Outputs: None
/*
* XXX - disable SAL checksum by setting size to 0, should be
* size of the actual init handler in mca_asm.S.
*/
void
ia64_log_proc_dev_err_info_print
(
sal_log_processor_info_t
*
slpi
,
prfunc_t
prfunc
)
{
#ifdef MCA_PRT_XTRA_DATA
size_t
d_len
=
slpi
->
header
.
len
-
sizeof
(
sal_log_section_hdr_t
);
#endif
sal_processor_static_info_t
*
spsi
;
int
i
;
sal_log_mod_error_info_t
*
p_data
;
ia64_mc_info
.
imi_monarch_init_handler
=
ia64_tpa
(
mon_init_ptr
->
fp
);
ia64_mc_info
.
imi_monarch_init_handler_size
=
0
;
ia64_mc_info
.
imi_slave_init_handler
=
ia64_tpa
(
slave_init_ptr
->
fp
);
ia64_mc_info
.
imi_slave_init_handler_size
=
0
;
prfunc
(
"+Processor Device Error Info Section
\n
"
);
IA64_MCA_DEBUG
(
"%s: OS INIT handler at %lx
\n
"
,
__FUNCTION__
,
ia64_mc_info
.
imi_monarch_init_handler
);
#ifdef MCA_PRT_XTRA_DATA // for test only @FVL
/* Register the os init handler with SAL */
if
((
rc
=
ia64_sal_set_vectors
(
SAL_VECTOR_OS_INIT
,
ia64_mc_info
.
imi_monarch_init_handler
,
ia64_tpa
(
ia64_getreg
(
_IA64_REG_GP
)),
ia64_mc_info
.
imi_monarch_init_handler_size
,
ia64_mc_info
.
imi_slave_init_handler
,
ia64_tpa
(
ia64_getreg
(
_IA64_REG_GP
)),
ia64_mc_info
.
imi_slave_init_handler_size
)))
{
char
*
p_data
=
(
char
*
)
&
slpi
->
valid
;
prfunc
(
"SAL_PROC_DEV_ERR SECTION DATA: Data buffer = %p, "
"Data size = %ld
\n
"
,
(
void
*
)
p_data
,
d_len
);
ia64_log_hexdump
(
p_data
,
d_len
,
prfunc
);
prfunc
(
"End of SAL_PROC_DEV_ERR SECTION DATA
\n
"
);
printk
(
KERN_ERR
"Failed to register m/s INIT handlers with SAL "
"(status %ld)
\n
"
,
rc
);
return
;
}
#endif // MCA_PRT_XTRA_DATA for test only @FVL
if
(
slpi
->
valid
.
proc_error_map
)
prfunc
(
" Processor Error Map: %#lx
\n
"
,
slpi
->
proc_error_map
);
if
(
slpi
->
valid
.
proc_state_param
)
prfunc
(
" Processor State Param: %#lx
\n
"
,
slpi
->
proc_state_parameter
);
if
(
slpi
->
valid
.
proc_cr_lid
)
prfunc
(
" Processor LID: %#lx
\n
"
,
slpi
->
proc_cr_lid
);
IA64_MCA_DEBUG
(
"%s: registered OS INIT handler with SAL
\n
"
,
__FUNCTION__
);
/*
* Note: March 2001 SAL spec states that if the number of elements in any
* of the MOD_ERROR_INFO_STRUCT arrays is zero, the entire array is
* absent. Also, current implementations only allocate space for number of
* elements used. So we walk the data pointer from here on.
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
*/
p_data
=
&
slpi
->
info
[
0
];
/* Print the cache check information if any*/
for
(
i
=
0
;
i
<
slpi
->
valid
.
num_cache_check
;
i
++
,
p_data
++
)
ia64_log_cache_check_info_print
(
i
,
p_data
,
prfunc
);
/* Print the tlb check information if any*/
for
(
i
=
0
;
i
<
slpi
->
valid
.
num_tlb_check
;
i
++
,
p_data
++
)
ia64_log_tlb_check_info_print
(
i
,
p_data
,
prfunc
);
/* Print the bus check information if any*/
for
(
i
=
0
;
i
<
slpi
->
valid
.
num_bus_check
;
i
++
,
p_data
++
)
ia64_log_bus_check_info_print
(
i
,
p_data
,
prfunc
);
/* Print the reg file check information if any*/
for
(
i
=
0
;
i
<
slpi
->
valid
.
num_reg_file_check
;
i
++
,
p_data
++
)
ia64_log_hexdump
((
u8
*
)
p_data
,
sizeof
(
sal_log_mod_error_info_t
),
prfunc
);
/* Just hex dump for now */
register_percpu_irq
(
IA64_CMC_VECTOR
,
&
cmci_irqaction
);
register_percpu_irq
(
IA64_CMCP_VECTOR
,
&
cmcp_irqaction
);
ia64_mca_cmc_vector_setup
();
/* Setup vector on BSP & enable */
/* Print the ms check information if any*/
for
(
i
=
0
;
i
<
slpi
->
valid
.
num_ms_check
;
i
++
,
p_data
++
)
ia64_log_hexdump
((
u8
*
)
p_data
,
sizeof
(
sal_log_mod_error_info_t
),
prfunc
);
/* Just hex dump for now */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq
(
IA64_MCA_RENDEZ_VECTOR
,
&
mca_rdzv_irqaction
);
/* Print CPUID registers if any*/
if
(
slpi
->
valid
.
cpuid_info
)
{
u64
*
p
=
(
u64
*
)
p_data
;
/* Setup the MCA wakeup interrupt vector */
register_percpu_irq
(
IA64_MCA_WAKEUP_VECTOR
,
&
mca_wkup_irqaction
);
prfunc
(
" CPUID Regs: %#lx %#lx %#lx %#lx
\n
"
,
p
[
0
],
p
[
1
],
p
[
2
],
p
[
3
]);
p_data
++
;
}
#ifdef CONFIG_ACPI
/* Setup the CPE interrupt vector */
{
irq_desc_t
*
desc
;
unsigned
int
irq
;
int
cpev
=
acpi_request_vector
(
ACPI_INTERRUPT_CPEI
);
/* Print processor static info if any */
if
(
slpi
->
valid
.
psi_static_struct
)
{
spsi
=
(
sal_processor_static_info_t
*
)
p_data
;
/* Print branch register contents if valid */
if
(
spsi
->
valid
.
br
)
ia64_log_processor_regs_print
(
spsi
->
br
,
8
,
"Branch"
,
"br"
,
prfunc
);
/* Print control register contents if valid */
if
(
spsi
->
valid
.
cr
)
ia64_log_processor_regs_print
(
spsi
->
cr
,
128
,
"Control"
,
"cr"
,
prfunc
);
/* Print application register contents if valid */
if
(
spsi
->
valid
.
ar
)
ia64_log_processor_regs_print
(
spsi
->
ar
,
128
,
"Application"
,
"ar"
,
prfunc
);
/* Print region register contents if valid */
if
(
spsi
->
valid
.
rr
)
ia64_log_processor_regs_print
(
spsi
->
rr
,
8
,
"Region"
,
"rr"
,
prfunc
);
/* Print floating-point register contents if valid */
if
(
spsi
->
valid
.
fr
)
ia64_log_processor_fp_regs_print
(
spsi
->
fr
,
128
,
"Floating-point"
,
"fr"
,
prfunc
);
if
(
cpev
>=
0
)
{
for
(
irq
=
0
;
irq
<
NR_IRQS
;
++
irq
)
if
(
irq_to_vector
(
irq
)
==
cpev
)
{
desc
=
irq_descp
(
irq
);
desc
->
status
|=
IRQ_PER_CPU
;
desc
->
handler
=
&
irq_type_iosapic_level
;
setup_irq
(
irq
,
&
mca_cpe_irqaction
);
}
}
/*
* ia64_log_processor_info_print
*
* Display the processor-specific information logged by PAL as a part
* of MCA or INIT or CMC.
*
* Inputs : lh (Pointer of the sal log header which specifies the
* format of SAL state info as specified by the SAL spec).
* prfunc (fn ptr of print function to be used for output).
* Outputs : None
*/
void
ia64_log_processor_info_print
(
sal_log_record_header_t
*
lh
,
prfunc_t
prfunc
)
{
sal_log_section_hdr_t
*
slsh
;
int
n_sects
;
u32
ercd_pos
;
if
(
!
lh
)
return
;
#ifdef MCA_PRT_XTRA_DATA // for test only @FVL
ia64_log_prt_record_header
(
lh
,
prfunc
);
#endif // MCA_PRT_XTRA_DATA for test only @FVL
if
((
ercd_pos
=
sizeof
(
sal_log_record_header_t
))
>=
lh
->
len
)
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: "
"truncated SAL CMC error record. len = %d
\n
"
,
lh
->
len
);
return
;
ia64_mca_register_cpev
(
cpev
);
}
/* Print record header info */
ia64_log_rec_header_print
(
lh
,
prfunc
);
for
(
n_sects
=
0
;
(
ercd_pos
<
lh
->
len
);
n_sects
++
,
ercd_pos
+=
slsh
->
len
)
{
/* point to next section header */
slsh
=
(
sal_log_section_hdr_t
*
)((
char
*
)
lh
+
ercd_pos
);
#ifdef MCA_PRT_XTRA_DATA // for test only @FVL
ia64_log_prt_section_header
(
slsh
,
prfunc
);
#endif // MCA_PRT_XTRA_DATA for test only @FVL
if
(
verify_guid
(
&
slsh
->
guid
,
&
(
SAL_PROC_DEV_ERR_SECT_GUID
)))
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: unsupported record section
\n
"
);
continue
;
}
#endif
/*
* Now process processor device error record section
/* Initialize the areas set aside by the OS to buffer the
* platform/processor error states for MCA/INIT/CMC
* handling.
*/
ia64_log_proc_dev_err_info_print
((
sal_log_processor_info_t
*
)
slsh
,
printk
);
}
ia64_log_init
(
SAL_INFO_TYPE_MCA
);
ia64_log_init
(
SAL_INFO_TYPE_INIT
);
ia64_log_init
(
SAL_INFO_TYPE_CMC
);
ia64_log_init
(
SAL_INFO_TYPE_CPE
);
IA64_MCA_DEBUG
(
"ia64_mca_log_print: "
"found %d sections in SAL CMC error record. len = %d
\n
"
,
n_sects
,
lh
->
len
);
if
(
!
n_sects
)
{
prfunc
(
"No Processor Device Error Info Section found
\n
"
);
return
;
}
printk
(
KERN_INFO
"MCA related initialization done
\n
"
);
}
/*
*
ia64_log_platform_info_prin
t
*
ia64_mca_late_ini
t
*
* Format and Log the SAL Platform Error Record.
* Opportunity to setup things that require initialization later
* than ia64_mca_init. Setup a timer to poll for CPEs if the
* platform doesn't support an interrupt driven mechanism.
*
* Inputs : lh (Pointer to the sal error record header with format
* specified by the SAL spec).
* prfunc (fn ptr of log output function to use)
* Outputs : platform error status
* Inputs : None
* Outputs : Status
*/
in
t
ia64_
log_platform_info_print
(
sal_log_record_header_t
*
lh
,
prfunc_t
prfunc
)
static
int
__ini
t
ia64_
mca_late_init
(
void
)
{
sal_log_section_hdr_t
*
slsh
;
int
n_sects
;
u32
ercd_pos
;
int
platform_err
=
0
;
if
(
!
lh
)
return
platform_err
;
#ifdef MCA_PRT_XTRA_DATA // for test only @FVL
ia64_log_prt_record_header
(
lh
,
prfunc
);
#endif // MCA_PRT_XTRA_DATA for test only @FVL
if
((
ercd_pos
=
sizeof
(
sal_log_record_header_t
))
>=
lh
->
len
)
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: "
"truncated SAL error record. len = %d
\n
"
,
lh
->
len
);
return
platform_err
;
}
/* Print record header info */
ia64_log_rec_header_print
(
lh
,
prfunc
);
for
(
n_sects
=
0
;
(
ercd_pos
<
lh
->
len
);
n_sects
++
,
ercd_pos
+=
slsh
->
len
)
{
/* point to next section header */
slsh
=
(
sal_log_section_hdr_t
*
)((
char
*
)
lh
+
ercd_pos
);
#ifdef MCA_PRT_XTRA_DATA // for test only @FVL
ia64_log_prt_section_header
(
slsh
,
prfunc
);
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PROC_DEV_ERR_SECT_GUID
)
!=
0
)
{
size_t
d_len
=
slsh
->
len
-
sizeof
(
sal_log_section_hdr_t
);
char
*
p_data
=
(
char
*
)
&
((
sal_log_mem_dev_err_info_t
*
)
slsh
)
->
valid
;
prfunc
(
"Start of Platform Err Data Section: Data buffer = %p, "
"Data size = %ld
\n
"
,
(
void
*
)
p_data
,
d_len
);
ia64_log_hexdump
(
p_data
,
d_len
,
prfunc
);
prfunc
(
"End of Platform Err Data Section
\n
"
);
}
#endif // MCA_PRT_XTRA_DATA for test only @FVL
/*
* Now process CPE error record section
*/
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PROC_DEV_ERR_SECT_GUID
)
==
0
)
{
ia64_log_proc_dev_err_info_print
((
sal_log_processor_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_MEM_DEV_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform Memory Device Error Info Section
\n
"
);
ia64_log_mem_dev_err_info_print
((
sal_log_mem_dev_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_SEL_DEV_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform SEL Device Error Info Section
\n
"
);
ia64_log_sel_dev_err_info_print
((
sal_log_sel_dev_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_PCI_BUS_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform PCI Bus Error Info Section
\n
"
);
ia64_log_pci_bus_err_info_print
((
sal_log_pci_bus_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform SMBIOS Device Error Info Section
\n
"
);
ia64_log_smbios_dev_err_info_print
((
sal_log_smbios_dev_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_PCI_COMP_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform PCI Component Error Info Section
\n
"
);
ia64_log_pci_comp_err_info_print
((
sal_log_pci_comp_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_SPECIFIC_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform Specific Error Info Section
\n
"
);
ia64_log_plat_specific_err_info_print
((
sal_log_plat_specific_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_HOST_CTLR_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform Host Controller Error Info Section
\n
"
);
ia64_log_host_ctlr_err_info_print
((
sal_log_host_ctlr_err_info_t
*
)
slsh
,
prfunc
);
}
else
if
(
efi_guidcmp
(
slsh
->
guid
,
SAL_PLAT_BUS_ERR_SECT_GUID
)
==
0
)
{
platform_err
=
1
;
prfunc
(
"+Platform Bus Error Info Section
\n
"
);
ia64_log_plat_bus_err_info_print
((
sal_log_plat_bus_err_info_t
*
)
slsh
,
prfunc
);
}
else
{
IA64_MCA_DEBUG
(
"ia64_mca_log_print: unsupported record section
\n
"
);
continue
;
}
}
init_timer
(
&
cmc_poll_timer
);
cmc_poll_timer
.
function
=
ia64_mca_cmc_poll
;
IA64_MCA_DEBUG
(
"ia64_mca_log_print: found %d sections in SAL error record. len = %d
\n
"
,
n_sects
,
lh
->
len
);
if
(
!
n_sects
)
{
prfunc
(
"No Platform Error Info Sections found
\n
"
);
return
platform_err
;
}
return
platform_err
;
}
/* Reset to the correct state */
cmc_polling_enabled
=
0
;
/*
* ia64_log_print
*
* Displays the contents of the OS error log information
*
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
* prfunc (fn ptr of log output function to use)
* Outputs : platform error status
*/
int
ia64_log_print
(
int
sal_info_type
,
prfunc_t
prfunc
)
{
int
platform_err
=
0
;
init_timer
(
&
cpe_poll_timer
);
cpe_poll_timer
.
function
=
ia64_mca_cpe_poll
;
switch
(
sal_info_type
)
{
case
SAL_INFO_TYPE_MCA
:
prfunc
(
"+CPU %d: SAL log contains MCA error record
\n
"
,
smp_processor_id
());
ia64_log_rec_header_print
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
prfunc
);
break
;
case
SAL_INFO_TYPE_INIT
:
prfunc
(
"+CPU %d: SAL log contains INIT error record
\n
"
,
smp_processor_id
());
ia64_log_rec_header_print
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
prfunc
);
break
;
case
SAL_INFO_TYPE_CMC
:
prfunc
(
"+BEGIN HARDWARE ERROR STATE AT CMC
\n
"
);
ia64_log_processor_info_print
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
prfunc
);
prfunc
(
"+END HARDWARE ERROR STATE AT CMC
\n
"
);
break
;
case
SAL_INFO_TYPE_CPE
:
prfunc
(
"+BEGIN HARDWARE ERROR STATE AT CPE
\n
"
);
ia64_log_platform_info_print
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
prfunc
);
prfunc
(
"+END HARDWARE ERROR STATE AT CPE
\n
"
);
break
;
default:
prfunc
(
"+MCA UNKNOWN ERROR LOG (UNIMPLEMENTED)
\n
"
);
break
;
#ifdef CONFIG_ACPI
/* If platform doesn't support CPEI, get the timer going. */
if
(
acpi_request_vector
(
ACPI_INTERRUPT_CPEI
)
<
0
&&
cpe_poll_enabled
)
{
register_percpu_irq
(
IA64_CPEP_VECTOR
,
&
mca_cpep_irqaction
);
ia64_mca_cpe_poll
(
0UL
);
}
return
platform_err
;
}
#endif
static
int
__init
ia64_mca_disable_cpe_polling
(
char
*
str
)
{
cpe_poll_enabled
=
0
;
return
1
;
return
0
;
}
__setup
(
"disable_cpe_poll"
,
ia64_mca_disable_cpe_polling
);
device_initcall
(
ia64_mca_late_init
);
arch/ia64/kernel/salinfo.c
View file @
3dbdb149
...
...
@@ -16,6 +16,9 @@
* Cache the record across multi-block reads from user space.
* Support > 64 cpus.
* Delete module_exit and MOD_INC/DEC_COUNT, salinfo cannot be a module.
*
* Jan 28 2004 kaos@sgi.com
* Periodically check for outstanding MCA or INIT records.
*/
#include <linux/types.h>
...
...
@@ -23,6 +26,7 @@
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <asm/semaphore.h>
...
...
@@ -179,6 +183,8 @@ shift1_data_saved (struct salinfo_data *data, int shift)
/* This routine is invoked in interrupt context. Note: mca.c enables
* interrupts before calling this code for CMC/CPE. MCA and INIT events are
* not irq safe, do not call any routines that use spinlocks, they may deadlock.
* MCA and INIT records are recorded, a timer event will look for any
* outstanding events and wake up the user space code.
*
* The buffer passed from mca.c points to the output from ia64_log_get. This is
* a persistent buffer but its contents can change between the interrupt and
...
...
@@ -186,12 +192,12 @@ shift1_data_saved (struct salinfo_data *data, int shift)
* changes.
*/
void
salinfo_log_wakeup
(
int
type
,
u8
*
buffer
,
u64
size
)
salinfo_log_wakeup
(
int
type
,
u8
*
buffer
,
u64
size
,
int
irqsafe
)
{
struct
salinfo_data
*
data
=
salinfo_data
+
type
;
struct
salinfo_data_saved
*
data_saved
;
unsigned
long
flags
=
0
;
int
i
,
irqsafe
=
type
!=
SAL_INFO_TYPE_MCA
&&
type
!=
SAL_INFO_TYPE_INIT
;
int
i
;
int
saved_size
=
ARRAY_SIZE
(
data
->
data_saved
);
BUG_ON
(
type
>=
ARRAY_SIZE
(
salinfo_log_name
));
...
...
@@ -224,6 +230,35 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size)
}
}
/* Check for outstanding MCA/INIT records every 5 minutes (arbitrary) */
#define SALINFO_TIMER_DELAY (5*60*HZ)
static
struct
timer_list
salinfo_timer
;
static
void
salinfo_timeout_check
(
struct
salinfo_data
*
data
)
{
int
i
;
if
(
!
data
->
open
)
return
;
for
(
i
=
0
;
i
<
NR_CPUS
;
++
i
)
{
if
(
test_bit
(
i
,
&
data
->
cpu_event
))
{
/* double up() is not a problem, user space will see no
* records for the additional "events".
*/
up
(
&
data
->
sem
);
}
}
}
static
void
salinfo_timeout
(
unsigned
long
arg
)
{
salinfo_timeout_check
(
salinfo_data
+
SAL_INFO_TYPE_MCA
);
salinfo_timeout_check
(
salinfo_data
+
SAL_INFO_TYPE_INIT
);
salinfo_timer
.
expires
=
jiffies
+
SALINFO_TIMER_DELAY
;
add_timer
(
&
salinfo_timer
);
}
static
int
salinfo_event_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
...
...
@@ -563,6 +598,11 @@ salinfo_init(void)
*
sdir
++
=
salinfo_dir
;
init_timer
(
&
salinfo_timer
);
salinfo_timer
.
expires
=
jiffies
+
SALINFO_TIMER_DELAY
;
salinfo_timer
.
function
=
&
salinfo_timeout
;
add_timer
(
&
salinfo_timer
);
return
0
;
}
...
...
arch/ia64/kernel/smpboot.c
View file @
3dbdb149
...
...
@@ -77,7 +77,6 @@ extern void __init calibrate_delay (void);
extern
void
start_ap
(
void
);
extern
unsigned
long
ia64_iobase
;
int
cpucount
;
task_t
*
task_for_booting_cpu
;
/* Bitmask of currently online CPUs */
...
...
arch/ia64/kernel/traps.c
View file @
3dbdb149
...
...
@@ -46,21 +46,14 @@ register double f30 asm ("f30"); register double f31 asm ("f31");
extern
spinlock_t
timerlist_lock
;
static
fpswa_interface_t
*
fpswa_interface
;
fpswa_interface_t
*
fpswa_interface
;
void
__init
trap_init
(
void
)
{
int
major
=
0
,
minor
=
0
;
if
(
ia64_boot_param
->
fpswa
)
{
if
(
ia64_boot_param
->
fpswa
)
/* FPSWA fixup: make the interface pointer a kernel virtual address: */
fpswa_interface
=
__va
(
ia64_boot_param
->
fpswa
);
major
=
fpswa_interface
->
revision
>>
16
;
minor
=
fpswa_interface
->
revision
&
0xffff
;
}
printk
(
KERN_INFO
"fpswa interface at %lx (rev %d.%d)
\n
"
,
ia64_boot_param
->
fpswa
,
major
,
minor
);
}
/*
...
...
arch/ia64/kernel/unaligned.c
View file @
3dbdb149
...
...
@@ -740,6 +740,7 @@ static int
emulate_load_int
(
unsigned
long
ifa
,
load_store_t
ld
,
struct
pt_regs
*
regs
)
{
unsigned
int
len
=
1
<<
ld
.
x6_sz
;
unsigned
long
val
=
0
;
/*
* r0, as target, doesn't need to be checked because Illegal Instruction
...
...
@@ -750,11 +751,9 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
*/
/*
* ldX.a we
don't try to emulate anything but we must
invalidate the ALAT entry.
* ldX.a we
will emulate load and also
invalidate the ALAT entry.
* See comment below for explanation on how we handle ldX.a
*/
if
(
ld
.
x6_op
!=
0x2
)
{
unsigned
long
val
=
0
;
if
(
len
!=
2
&&
len
!=
4
&&
len
!=
8
)
{
DPRINT
(
"unknown size: x6=%d
\n
"
,
ld
.
x6_sz
);
...
...
@@ -764,7 +763,6 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
if
(
copy_from_user
(
&
val
,
(
void
*
)
ifa
,
len
))
return
-
1
;
setreg
(
ld
.
r1
,
val
,
0
,
regs
);
}
/*
* check for updates on any kind of loads
...
...
@@ -817,7 +815,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* store & shift to temporary;
* r1=temporary
*
* So in
t
this case, you would get the right value is r1 but the wrong info in
* So in this case, you would get the right value is r1 but the wrong info in
* the ALAT. Notice that you could do it in reverse to finish with address 3
* but you would still get the size wrong. To get the size right, one needs to
* execute exactly the same kind of load. You could do it from a aligned
...
...
@@ -826,9 +824,12 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* So no matter what, it is not possible to emulate an advanced load
* correctly. But is that really critical ?
*
* We will always convert ld.a into a normal load with ALAT invalidated. This
* will enable compiler to do optimization where certain code path after ld.a
* is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
*
*
Now one has to look at how ld.a is use
d, one must either do a ld.c.* or
* ch
c
k.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
*
If there is a store after the advanced loa
d, one must either do a ld.c.* or
* chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
* entry found in ALAT), and that's perfectly ok because:
*
* - ld.c.*, if the entry is not present a normal load is executed
...
...
@@ -836,19 +837,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
*
* In either case, the load can be potentially retried in another form.
*
* So it's okay NOT to do any actual load on an unaligned ld.a. However the ALAT
* must be invalidated for the register (so that's chck.a.*,ld.c.* don't pick up
* a stale entry later) The register base update MUST also be performed.
*
* Now what is the content of the register and its NaT bit in the case we don't
* do the load ? EAS2.4, says (in case an actual load is needed)
*
* - r1 = [r3], Nat = 0 if succeeds
* - r1 = 0 Nat = 0 if trying to access non-speculative memory
*
* For us, there is nothing to do, because both ld.c.* and chk.a.* are going to
* retry and thus eventually reload the register thereby changing Nat and
* register content.
* ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
* up a stale entry later). The register base update MUST also be performed.
*/
/*
...
...
arch/ia64/lib/io.c
View file @
3dbdb149
...
...
@@ -9,13 +9,13 @@
* This needs to be optimized.
*/
void
__ia64_memcpy_fromio
(
void
*
to
,
unsigned
long
from
,
long
count
)
__ia64_memcpy_fromio
(
void
*
to
,
unsigned
long
from
,
long
count
)
{
char
*
dst
=
to
;
while
(
count
)
{
count
--
;
*
(
char
*
)
to
=
readb
(
from
);
((
char
*
)
to
)
++
;
from
++
;
*
dst
++
=
readb
(
from
++
);
}
}
EXPORT_SYMBOL
(
__ia64_memcpy_fromio
);
...
...
@@ -25,13 +25,13 @@ EXPORT_SYMBOL(__ia64_memcpy_fromio);
* This needs to be optimized.
*/
void
__ia64_memcpy_toio
(
unsigned
long
to
,
void
*
from
,
long
count
)
__ia64_memcpy_toio
(
unsigned
long
to
,
void
*
from
,
long
count
)
{
char
*
src
=
from
;
while
(
count
)
{
count
--
;
writeb
(
*
(
char
*
)
from
,
to
);
((
char
*
)
from
)
++
;
to
++
;
writeb
(
*
src
++
,
to
++
);
}
}
EXPORT_SYMBOL
(
__ia64_memcpy_toio
);
...
...
arch/ia64/sn/kernel/mca.c
View file @
3dbdb149
...
...
@@ -68,20 +68,6 @@ print_hook(const char *fmt, ...)
}
/*
* ia64_sn2_platform_plat_specific_err_print
*
* Called by the MCA handler to log platform-specific errors.
*/
void
ia64_sn2_platform_plat_specific_err_print
(
int
header_len
,
int
sect_len
,
u8
*
p_data
,
prfunc_t
prfunc
)
{
ia64_sn_plat_specific_err_print
(
print_hook
,
p_data
-
sect_len
);
}
static
void
sn_cpei_handler
(
int
irq
,
void
*
devid
,
struct
pt_regs
*
regs
)
{
...
...
arch/ia64/sn/kernel/sn2/sn2_smp.c
View file @
3dbdb149
...
...
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-200
3
Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-200
4
Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/init.h>
...
...
@@ -27,6 +27,8 @@
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/numa.h>
#include <asm/bitops.h>
#include <asm/hw_irq.h>
#include <asm/current.h>
#include <asm/sn/sn_cpuid.h>
...
...
@@ -67,14 +69,56 @@ wait_piowc(void)
*
* Purges the translation caches of all processors of the given virtual address
* range.
*
* Note:
* - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
* - cpu_vm_mask is converted into a nodemask of the nodes containing the
* cpus in cpu_vm_mask.
* - if only one bit is set in cpu_vm_mask & it is the current cpu,
* then only the local TLB needs to be flushed. This flushing can be done
* using ptc.l. This is the common case & avoids the global spinlock.
* - if multiple cpus have loaded the context, then flushing has to be
* done with ptc.g/MMRs under protection of the global ptc_lock.
*/
void
sn2_global_tlb_purge
(
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
nbits
)
{
int
cnode
,
mycnode
,
nasid
,
flushed
=
0
;
int
i
,
cnode
,
mynasid
,
cpu
,
lcpu
=
0
,
nasid
,
flushed
=
0
;
volatile
unsigned
long
*
ptc0
,
*
ptc1
;
unsigned
long
flags
=
0
,
data0
,
data1
;
struct
mm_struct
*
mm
=
current
->
active_mm
;
short
nasids
[
NR_NODES
],
nix
;
DECLARE_BITMAP
(
nodes_flushed
,
NR_NODES
);
CLEAR_BITMAP
(
nodes_flushed
,
NR_NODES
);
i
=
0
;
for_each_cpu_mask
(
cpu
,
mm
->
cpu_vm_mask
)
{
cnode
=
cpu_to_node
(
cpu
);
__set_bit
(
cnode
,
nodes_flushed
);
lcpu
=
cpu
;
i
++
;
}
preempt_disable
();
if
(
likely
(
i
==
1
&&
lcpu
==
smp_processor_id
()))
{
do
{
ia64_ptcl
(
start
,
nbits
<<
2
);
start
+=
(
1UL
<<
nbits
);
}
while
(
start
<
end
);
ia64_srlz_i
();
preempt_enable
();
return
;
}
nix
=
0
;
for
(
cnode
=
find_first_bit
(
&
nodes_flushed
,
NR_NODES
);
cnode
<
NR_NODES
;
cnode
=
find_next_bit
(
&
nodes_flushed
,
NR_NODES
,
++
cnode
))
nasids
[
nix
++
]
=
cnodeid_to_nasid
(
cnode
);
data0
=
(
1UL
<<
SH_PTC_0_A_SHFT
)
|
(
nbits
<<
SH_PTC_0_PS_SHFT
)
|
...
...
@@ -84,20 +128,19 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
ptc0
=
(
long
*
)
GLOBAL_MMR_PHYS_ADDR
(
0
,
SH_PTC_0
);
ptc1
=
(
long
*
)
GLOBAL_MMR_PHYS_ADDR
(
0
,
SH_PTC_1
);
mycnode
=
numa_node_id
();
mynasid
=
smp_physical_node_id
();
spin_lock_irqsave
(
&
sn2_global_ptc_lock
,
flags
);
do
{
data1
=
start
|
(
1UL
<<
SH_PTC_1_START_SHFT
);
for
(
cnode
=
0
;
cnode
<
numnodes
;
cnode
++
)
{
if
(
is_headless_node
(
cnode
))
continue
;
if
(
cnode
==
mycnode
)
{
for
(
i
=
0
;
i
<
nix
;
i
++
)
{
nasid
=
nasids
[
i
];
if
(
likely
(
nasid
==
mynasid
))
{
ia64_ptcga
(
start
,
nbits
<<
2
);
ia64_srlz_i
();
}
else
{
nasid
=
cnodeid_to_nasid
(
cnode
);
ptc0
=
CHANGE_NASID
(
nasid
,
ptc0
);
ptc1
=
CHANGE_NASID
(
nasid
,
ptc1
);
pio_atomic_phys_write_mmrs
(
ptc0
,
data0
,
ptc1
,
data1
);
...
...
@@ -115,6 +158,7 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
spin_unlock_irqrestore
(
&
sn2_global_ptc_lock
,
flags
);
preempt_enable
();
}
/*
...
...
include/asm-ia64/fpswa.h
View file @
3dbdb149
...
...
@@ -68,4 +68,6 @@ typedef struct {
efi_fpswa_t
fpswa
;
}
fpswa_interface_t
;
extern
fpswa_interface_t
*
fpswa_interface
;
#endif
/* _ASM_IA64_FPSWA_H */
include/asm-ia64/mca.h
View file @
3dbdb149
...
...
@@ -2,7 +2,7 @@
* File: mca.h
* Purpose: Machine check handling specific defines
*
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 1999
, 2004
Silicon Graphics, Inc.
* Copyright (C) Vijay Chander (vijay@engr.sgi.com)
* Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
*/
...
...
@@ -20,23 +20,8 @@
#include <asm/processor.h>
#include <asm/mca_asm.h>
/* These are the return codes from all the IA64_MCA specific interfaces */
typedef
int
ia64_mca_return_code_t
;
enum
{
IA64_MCA_SUCCESS
=
0
,
IA64_MCA_FAILURE
=
1
};
#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000)
/* value in milliseconds - 20 seconds */
#define IA64_CMC_INT_DISABLE 0
#define IA64_CMC_INT_ENABLE 1
typedef
u32
int_vector_t
;
typedef
u64
millisec_t
;
typedef
union
cmcv_reg_u
{
u64
cmcv_regval
;
struct
{
...
...
@@ -53,10 +38,6 @@ typedef union cmcv_reg_u {
#define cmcv_mask cmcv_reg_s.cmcr_mask
#define cmcv_vector cmcv_reg_s.cmcr_vector
#define IA64_MCA_UCMC_HANDLER_SIZE 0x10
#define IA64_INIT_HANDLER_SIZE 0x10
enum
{
IA64_MCA_RENDEZ_CHECKIN_NOTDONE
=
0x0
,
IA64_MCA_RENDEZ_CHECKIN_DONE
=
0x1
...
...
@@ -85,16 +66,6 @@ typedef struct ia64_mc_info_s {
}
ia64_mc_info_t
;
/* Possible rendez states passed from SAL to OS during MCA
* handoff
*/
enum
{
IA64_MCA_RENDEZ_NOT_RQD
=
0x0
,
IA64_MCA_RENDEZ_DONE_WITHOUT_INIT
=
0x1
,
IA64_MCA_RENDEZ_DONE_WITH_INIT
=
0x2
,
IA64_MCA_RENDEZ_FAILURE
=
-
1
};
typedef
struct
ia64_mca_sal_to_os_state_s
{
u64
imsto_os_gp
;
/* GP of the os registered with the SAL */
u64
imsto_pal_proc
;
/* PAL_PROC entry point - physical addr */
...
...
@@ -136,41 +107,14 @@ typedef struct ia64_mca_os_to_sal_state_s {
*/
}
ia64_mca_os_to_sal_state_t
;
typedef
int
(
*
prfunc_t
)(
const
char
*
fmt
,
...);
extern
void
ia64_mca_init
(
void
);
extern
void
ia64_os_mca_dispatch
(
void
);
extern
void
ia64_os_mca_dispatch_end
(
void
);
extern
void
ia64_mca_ucmc_handler
(
void
);
extern
void
ia64_monarch_init_handler
(
void
);
extern
void
ia64_slave_init_handler
(
void
);
extern
irqreturn_t
ia64_mca_rendez_int_handler
(
int
,
void
*
,
struct
pt_regs
*
);
extern
irqreturn_t
ia64_mca_wakeup_int_handler
(
int
,
void
*
,
struct
pt_regs
*
);
extern
irqreturn_t
ia64_mca_cmc_int_handler
(
int
,
void
*
,
struct
pt_regs
*
);
extern
irqreturn_t
ia64_mca_cmc_int_caller
(
int
,
void
*
,
struct
pt_regs
*
);
extern
irqreturn_t
ia64_mca_cpe_int_handler
(
int
,
void
*
,
struct
pt_regs
*
);
extern
irqreturn_t
ia64_mca_cpe_int_caller
(
int
,
void
*
,
struct
pt_regs
*
);
extern
int
ia64_log_print
(
int
,
prfunc_t
);
extern
void
ia64_mca_cmc_vector_setup
(
void
);
extern
int
ia64_mca_check_errors
(
void
);
#define PLATFORM_CALL(fn, args) printk("Platform call TBD\n")
#define platform_mem_dev_err_print ia64_log_prt_oem_data
#define platform_pci_bus_err_print ia64_log_prt_oem_data
#define platform_pci_comp_err_print ia64_log_prt_oem_data
#define platform_plat_specific_err_print ia64_log_prt_oem_data
#define platform_host_ctlr_err_print ia64_log_prt_oem_data
#define platform_plat_bus_err_print ia64_log_prt_oem_data
#undef MCA_TEST
#undef IA64_MCA_DEBUG_INFO
#if defined(IA64_MCA_DEBUG_INFO)
# define IA64_MCA_DEBUG(fmt...) printk(fmt)
#else
# define IA64_MCA_DEBUG(fmt...)
#endif
#endif
/* !__ASSEMBLY__ */
#endif
/* _ASM_IA64_MCA_H */
include/asm-ia64/mmu_context.h
View file @
3dbdb149
...
...
@@ -106,6 +106,7 @@ get_mmu_context (struct mm_struct *mm)
/* re-check, now that we've got the lock: */
context
=
mm
->
context
;
if
(
context
==
0
)
{
cpus_clear
(
mm
->
cpu_vm_mask
);
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
limit
)
wrap_mmu_context
(
mm
);
mm
->
context
=
context
=
ia64_ctx
.
next
++
;
...
...
@@ -170,6 +171,8 @@ activate_context (struct mm_struct *mm)
do
{
context
=
get_mmu_context
(
mm
);
MMU_TRACE
(
'A'
,
smp_processor_id
(),
mm
,
context
);
if
(
!
cpu_isset
(
smp_processor_id
(),
mm
->
cpu_vm_mask
))
cpu_set
(
smp_processor_id
(),
mm
->
cpu_vm_mask
);
reload_context
(
context
);
MMU_TRACE
(
'a'
,
smp_processor_id
(),
mm
,
context
);
/* in the unlikely event of a TLB-flush by another thread, redo the load: */
...
...
include/asm-ia64/percpu.h
View file @
3dbdb149
...
...
@@ -50,7 +50,7 @@ extern void *per_cpu_init(void);
#else
/* ! SMP */
#define per_cpu(var, cpu) (
(void)cpu, per_cpu__##var
)
#define per_cpu(var, cpu) (
*((void)cpu, &per_cpu__##var)
)
#define __get_cpu_var(var) per_cpu__##var
#define per_cpu_init() (__phys_per_cpu_start)
...
...
include/asm-ia64/processor.h
View file @
3dbdb149
...
...
@@ -294,7 +294,7 @@ struct thread_struct {
.on_ustack = 0, \
.ksp = 0, \
.map_base = DEFAULT_MAP_BASE, \
.rbs_bot = DEFAULT_USER_STACK_SIZE, \
.rbs_bot =
STACK_TOP -
DEFAULT_USER_STACK_SIZE, \
.task_size = DEFAULT_TASK_SIZE, \
.last_fph_cpu = -1, \
INIT_THREAD_IA32 \
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment