Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
8beb1642
Commit
8beb1642
authored
Jul 29, 2002
by
David Mosberger
Browse files
Options
Browse Files
Download
Plain Diff
ia64: Manual merge.
parents
12ebbff8
cb1a895f
Changes
59
Hide whitespace changes
Inline
Side-by-side
Showing
59 changed files
with
1899 additions
and
901 deletions
+1899
-901
arch/ia64/Makefile
arch/ia64/Makefile
+1
-1
arch/ia64/config.in
arch/ia64/config.in
+43
-71
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+156
-329
arch/ia64/hp/sim/hpsim_console.c
arch/ia64/hp/sim/hpsim_console.c
+6
-6
arch/ia64/hp/sim/hpsim_irq.c
arch/ia64/hp/sim/hpsim_irq.c
+8
-8
arch/ia64/hp/sim/simserial.c
arch/ia64/hp/sim/simserial.c
+1
-0
arch/ia64/hp/zx1/hpzx1_machvec.c
arch/ia64/hp/zx1/hpzx1_machvec.c
+0
-2
arch/ia64/ia32/binfmt_elf32.c
arch/ia64/ia32/binfmt_elf32.c
+1
-1
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+64
-57
arch/ia64/kernel/efi.c
arch/ia64/kernel/efi.c
+123
-24
arch/ia64/kernel/init_task.c
arch/ia64/kernel/init_task.c
+2
-2
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+21
-22
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+10
-3
arch/ia64/kernel/irq_lsapic.c
arch/ia64/kernel/irq_lsapic.c
+8
-8
arch/ia64/kernel/machvec.c
arch/ia64/kernel/machvec.c
+6
-3
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca.c
+12
-12
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/mca_asm.S
+2
-2
arch/ia64/kernel/pci.c
arch/ia64/kernel/pci.c
+26
-1
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+61
-56
arch/ia64/kernel/perfmon_itanium.h
arch/ia64/kernel/perfmon_itanium.h
+99
-0
arch/ia64/kernel/perfmon_mckinley.h
arch/ia64/kernel/perfmon_mckinley.h
+134
-0
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+5
-0
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+16
-5
arch/ia64/kernel/signal.c
arch/ia64/kernel/signal.c
+1
-0
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+2
-2
arch/ia64/kernel/time.c
arch/ia64/kernel/time.c
+15
-14
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+32
-9
arch/ia64/kernel/unwind.c
arch/ia64/kernel/unwind.c
+10
-9
arch/ia64/lib/Makefile
arch/ia64/lib/Makefile
+4
-4
arch/ia64/lib/copy_user.S
arch/ia64/lib/copy_user.S
+5
-3
arch/ia64/lib/io.c
arch/ia64/lib/io.c
+9
-0
arch/ia64/lib/memcpy_mck.S
arch/ia64/lib/memcpy_mck.S
+674
-0
arch/ia64/lib/swiotlb.c
arch/ia64/lib/swiotlb.c
+15
-15
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+17
-4
arch/ia64/mm/tlb.c
arch/ia64/mm/tlb.c
+10
-5
arch/ia64/sn/io/ifconfig_net.c
arch/ia64/sn/io/ifconfig_net.c
+3
-3
arch/ia64/sn/io/pciba.c
arch/ia64/sn/io/pciba.c
+14
-14
arch/ia64/sn/io/sn1/hubcounters.c
arch/ia64/sn/io/sn1/hubcounters.c
+1
-1
arch/ia64/sn/io/sn1/pcibr.c
arch/ia64/sn/io/sn1/pcibr.c
+16
-16
arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
+16
-16
arch/ia64/sn/kernel/setup.c
arch/ia64/sn/kernel/setup.c
+11
-11
include/asm-ia64/bitops.h
include/asm-ia64/bitops.h
+1
-1
include/asm-ia64/delay.h
include/asm-ia64/delay.h
+1
-1
include/asm-ia64/keyboard.h
include/asm-ia64/keyboard.h
+2
-0
include/asm-ia64/kregs.h
include/asm-ia64/kregs.h
+9
-0
include/asm-ia64/machvec.h
include/asm-ia64/machvec.h
+1
-0
include/asm-ia64/machvec_init.h
include/asm-ia64/machvec_init.h
+1
-0
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+24
-5
include/asm-ia64/offsets.h
include/asm-ia64/offsets.h
+0
-130
include/asm-ia64/pci.h
include/asm-ia64/pci.h
+1
-1
include/asm-ia64/perfmon.h
include/asm-ia64/perfmon.h
+2
-3
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+3
-7
include/asm-ia64/scatterlist.h
include/asm-ia64/scatterlist.h
+3
-3
include/asm-ia64/softirq.h
include/asm-ia64/softirq.h
+1
-0
include/asm-ia64/suspend.h
include/asm-ia64/suspend.h
+0
-0
include/asm-ia64/system.h
include/asm-ia64/system.h
+3
-1
include/asm-ia64/tlb.h
include/asm-ia64/tlb.h
+179
-4
include/asm-ia64/tlbflush.h
include/asm-ia64/tlbflush.h
+4
-6
include/asm-ia64/unistd.h
include/asm-ia64/unistd.h
+4
-0
No files found.
arch/ia64/Makefile
View file @
8beb1642
...
@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp
...
@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION
=
$(
shell
$(CC)
-v
2>&1 | fgrep
'gcc version'
|
cut
-f3
-d
' '
|
cut
-f1
-d
'.'
)
GCC_VERSION
=
$(
shell
$(CC)
-v
2>&1 | fgrep
'gcc version'
|
cut
-f3
-d
' '
|
cut
-f1
-d
'.'
)
ifneq
($(GCC_VERSION),2)
ifneq
($(GCC_VERSION),2)
CFLAGS
+=
-frename-registers
--param
max-inline-insns
=
2
000
CFLAGS
+=
-frename-registers
--param
max-inline-insns
=
5
000
endif
endif
ifeq
($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
ifeq
($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
...
...
arch/ia64/config.in
View file @
8beb1642
...
@@ -64,12 +64,13 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then
...
@@ -64,12 +64,13 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then
fi
fi
fi
fi
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ]; then
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ];
then
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y
define_bool CONFIG_PM y
fi
fi
if [ "$CONFIG_IA64_SGI_SN1" = "y" -o
"$CONFIG_IA64_SGI_SN2" = "y" ]; then
if [ "$CONFIG_IA64_SGI_SN1" = "y" -o "$CONFIG_IA64_SGI_SN2" = "y" ]; then
define_bool CONFIG_IA64_SGI_SN y
define_bool CONFIG_IA64_SGI_SN y
bool ' Enable extra debugging code' CONFIG_IA64_SGI_SN_DEBUG n
bool ' Enable extra debugging code' CONFIG_IA64_SGI_SN_DEBUG n
bool ' Enable SGI Medusa Simulator Support' CONFIG_IA64_SGI_SN_SIM
bool ' Enable SGI Medusa Simulator Support' CONFIG_IA64_SGI_SN_SIM
...
@@ -99,21 +100,21 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
...
@@ -99,21 +100,21 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/acpi/Config.in
source drivers/acpi/Config.in
bool 'PCI support' CONFIG_PCI
bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in
source drivers/pci/Config.in
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
if [ "$CONFIG_HOTPLUG" = "y" ]; then
if [ "$CONFIG_HOTPLUG" = "y" ]; then
source drivers/pcmcia/Config.in
source drivers/hotplug/Config.in
else
source drivers/pcmcia/Config.in
define_bool CONFIG_PCMCIA n
else
fi
define_bool CONFIG_PCMCIA n
fi
source drivers/parport/Config.in
source drivers/parport/Config.in
fi # !HP_SIM
fi # !HP_SIM
endmenu
endmenu
...
@@ -124,38 +125,17 @@ fi
...
@@ -124,38 +125,17 @@ fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/mtd/Config.in
source drivers/pnp/Config.in
source drivers/block/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
source drivers/md/Config.in
source drivers/message/fusion/Config.in
mainmenu_option next_comment
comment 'ATA/ATAPI/MFM/RLL support'
tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE
if [ "$CONFIG_IDE" != "n" ]; then
source drivers/ide/Config.in
else
define_bool CONFIG_BLK_DEV_HD n
fi
endmenu
else # ! HP_SIM
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi
endmenu
fi # !HP_SIM
mainmenu_option next_comment
mainmenu_option next_comment
comment 'SCSI support'
comment 'SCSI support'
...
@@ -168,31 +148,26 @@ fi
...
@@ -168,31 +148,26 @@ fi
endmenu
endmenu
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in
fi
endmenu
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in
fi
endmenu
fi
source net/ax25/Config.in
source drivers/isdn/Config.in
mainmenu_option next_comment
comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)'
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/cdrom/Config.in
fi
endmenu
fi
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
fi # !HP_SIM
fi # !HP_SIM
#
#
...
@@ -220,21 +195,18 @@ fi
...
@@ -220,21 +195,18 @@ fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
mainmenu_option next_comment
mainmenu_option next_comment
comment 'Sound'
comment 'Sound'
tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in
fi
endmenu
source drivers/usb/Config.in
source lib/Config.in
source net/bluetooth/Config.in
tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in
fi
endmenu
source drivers/usb/Config.in
source lib/Config.in
source net/bluetooth/Config.in
fi # !HP_SIM
fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
...
...
arch/ia64/hp/common/sba_iommu.c
View file @
8beb1642
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
** IA64 System Bus Adapter (SBA) I/O MMU manager
** IA64 System Bus Adapter (SBA) I/O MMU manager
**
**
** (c) Copyright 2002 Alex Williamson
** (c) Copyright 2002 Alex Williamson
** (c) Copyright 2002 Grant Grundler
** (c) Copyright 2002 Hewlett-Packard Company
** (c) Copyright 2002 Hewlett-Packard Company
**
**
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
...
@@ -110,7 +111,7 @@
...
@@ -110,7 +111,7 @@
*/
*/
#define DELAYED_RESOURCE_CNT 16
#define DELAYED_RESOURCE_CNT 16
#define DEFAULT_DMA_HINT_REG 0
#define DEFAULT_DMA_HINT_REG
(d)
0
#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
...
@@ -216,9 +217,10 @@ static int sba_count;
...
@@ -216,9 +217,10 @@ static int sba_count;
static
int
reserve_sba_gart
=
1
;
static
int
reserve_sba_gart
=
1
;
static
struct
pci_dev
sac_only_dev
;
static
struct
pci_dev
sac_only_dev
;
#define sba_sg_
iova(sg) (sg->address
)
#define sba_sg_
address(sg) (page_address((sg)->page) + (sg)->offset
)
#define sba_sg_len(sg) (sg->length)
#define sba_sg_len(sg) (sg->length)
#define sba_sg_buffer(sg) (sg->orig_address)
#define sba_sg_iova(sg) (sg->dma_address)
#define sba_sg_iova_len(sg) (sg->dma_length)
/* REVISIT - fix me for multiple SBAs/IOCs */
/* REVISIT - fix me for multiple SBAs/IOCs */
#define GET_IOC(dev) (sba_list->ioc)
#define GET_IOC(dev) (sba_list->ioc)
...
@@ -232,7 +234,7 @@ static struct pci_dev sac_only_dev;
...
@@ -232,7 +234,7 @@ static struct pci_dev sac_only_dev;
** rather than the HW. I/O MMU allocation alogorithms can be
** rather than the HW. I/O MMU allocation alogorithms can be
** faster with smaller size is (to some degree).
** faster with smaller size is (to some degree).
*/
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*
PAGE
_SIZE)
#define DMA_CHUNK_SIZE (BITS_PER_LONG*
IOVP
_SIZE)
/* Looks nice and keeps the compiler happy */
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
#define SBA_DEV(d) ((struct sba_device *) (d))
...
@@ -255,7 +257,7 @@ static struct pci_dev sac_only_dev;
...
@@ -255,7 +257,7 @@ static struct pci_dev sac_only_dev;
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
* @hpa: base address of the IOMMU
*
*
* Print the size/location of the IO MMU P
DIR
.
* Print the size/location of the IO MMU P
dir
.
*/
*/
static
void
static
void
sba_dump_tlb
(
char
*
hpa
)
sba_dump_tlb
(
char
*
hpa
)
...
@@ -273,12 +275,12 @@ sba_dump_tlb(char *hpa)
...
@@ -273,12 +275,12 @@ sba_dump_tlb(char *hpa)
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
/**
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU P
DIR
entry
* sba_dump_pdir_entry - debugging only - print one IOMMU P
dir
entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @msg: text to print ont the output line.
* @pide: pdir index.
* @pide: pdir index.
*
*
* Print one entry of the IO MMU P
DIR
in human readable form.
* Print one entry of the IO MMU P
dir
in human readable form.
*/
*/
static
void
static
void
sba_dump_pdir_entry
(
struct
ioc
*
ioc
,
char
*
msg
,
uint
pide
)
sba_dump_pdir_entry
(
struct
ioc
*
ioc
,
char
*
msg
,
uint
pide
)
...
@@ -360,25 +362,25 @@ sba_check_pdir(struct ioc *ioc, char *msg)
...
@@ -360,25 +362,25 @@ sba_check_pdir(struct ioc *ioc, char *msg)
* print the SG list so we can verify it's correct by hand.
* print the SG list so we can verify it's correct by hand.
*/
*/
static
void
static
void
sba_dump_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_dump_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
{
while
(
nents
--
>
0
)
{
while
(
nents
--
>
0
)
{
printk
(
" %d :
%08lx/%05x
%p
\n
"
,
printk
(
" %d :
DMA %08lx/%05x CPU
%p
\n
"
,
nents
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
(
unsigned
long
)
sba_sg_iova
(
startsg
),
sba_sg_len
(
startsg
),
sba_sg_
iova_
len
(
startsg
),
sba_sg_
buffer
(
startsg
));
sba_sg_
address
(
startsg
));
startsg
++
;
startsg
++
;
}
}
}
}
static
void
static
void
sba_check_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_check_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
{
struct
scatterlist
*
the_sg
=
startsg
;
struct
scatterlist
*
the_sg
=
startsg
;
int
the_nents
=
nents
;
int
the_nents
=
nents
;
while
(
the_nents
--
>
0
)
{
while
(
the_nents
--
>
0
)
{
if
(
sba_sg_
buffer
(
the_sg
)
==
0x0UL
)
if
(
sba_sg_
address
(
the_sg
)
==
0x0UL
)
sba_dump_sg
(
NULL
,
startsg
,
nents
);
sba_dump_sg
(
NULL
,
startsg
,
nents
);
the_sg
++
;
the_sg
++
;
}
}
...
@@ -404,7 +406,6 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
...
@@ -404,7 +406,6 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
/* FIXME : review these macros to verify correctness and usage */
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define RESMAP_MASK(n) ~(~0UL << (n))
#define RESMAP_MASK(n) ~(~0UL << (n))
...
@@ -412,7 +413,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
...
@@ -412,7 +413,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
/**
/**
* sba_search_bitmap - find free space in IO P
DIR
resource bitmap
* sba_search_bitmap - find free space in IO P
dir
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
* @bits_wanted: number of entries we need.
*
*
...
@@ -449,7 +450,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
...
@@ -449,7 +450,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
** We need the alignment to invalidate I/O TLB using
** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path.
** SBA HW features in the unmap path.
*/
*/
unsigned
long
o
=
1
<<
get_order
(
bits_wanted
<<
PAGE
_SHIFT
);
unsigned
long
o
=
1
<<
get_order
(
bits_wanted
<<
IOVP
_SHIFT
);
uint
bitshiftcnt
=
ROUNDUP
(
ioc
->
res_bitshift
,
o
);
uint
bitshiftcnt
=
ROUNDUP
(
ioc
->
res_bitshift
,
o
);
unsigned
long
mask
;
unsigned
long
mask
;
...
@@ -495,7 +496,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
...
@@ -495,7 +496,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
/**
/**
* sba_alloc_range - find free bits and mark them in IO P
DIR
resource bitmap
* sba_alloc_range - find free bits and mark them in IO P
dir
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
* @size: number of bytes to create a mapping for
*
*
...
@@ -557,7 +558,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
...
@@ -557,7 +558,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
/**
/**
* sba_free_range - unmark bits in IO P
DIR
resource bitmap
* sba_free_range - unmark bits in IO P
dir
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
* @size: number of bytes to create a mapping for
...
@@ -604,14 +605,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
...
@@ -604,14 +605,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
/**
/**
* sba_io_pdir_entry - fill in one IO P
DIR
entry
* sba_io_pdir_entry - fill in one IO P
dir
entry
* @pdir_ptr: pointer to IO P
DIR
entry
* @pdir_ptr: pointer to IO P
dir
entry
* @
vba: Virtual CPU address of buffer
to map
* @
phys_page: phys CPU address of page
to map
*
*
* SBA Mapping Routine
* SBA Mapping Routine
*
*
* Given a
virtual address (vba
, arg1) sba_io_pdir_entry()
* Given a
physical address (phys_page
, arg1) sba_io_pdir_entry()
* loads the I/O P
DIR
entry pointed to by pdir_ptr (arg0).
* loads the I/O P
dir
entry pointed to by pdir_ptr (arg0).
* Each IO Pdir entry consists of 8 bytes as shown below
* Each IO Pdir entry consists of 8 bytes as shown below
* (LSB == bit 0):
* (LSB == bit 0):
*
*
...
@@ -623,20 +624,12 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
...
@@ -623,20 +624,12 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
* V == Valid Bit
* V == Valid Bit
* U == Unused
* U == Unused
* PPN == Physical Page Number
* PPN == Physical Page Number
*
* The physical address fields are filled with the results of virt_to_phys()
* on the vba.
*/
*/
#if 1
#define SBA_VALID_MASK 0x80000000000000FFULL
#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL)
#define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK)
#else
#define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK)
void
SBA_INLINE
sba_io_pdir_entry
(
u64
*
pdir_ptr
,
unsigned
long
vba
)
{
*
pdir_ptr
=
((
vba
&
~
0xE000000000000FFFULL
)
|
0x80000000000000FFULL
);
}
#endif
#ifdef ENABLE_MARK_CLEAN
#ifdef ENABLE_MARK_CLEAN
/**
/**
...
@@ -660,12 +653,12 @@ mark_clean (void *addr, size_t size)
...
@@ -660,12 +653,12 @@ mark_clean (void *addr, size_t size)
#endif
#endif
/**
/**
* sba_mark_invalid - invalidate one or more IO P
DIR
entries
* sba_mark_invalid - invalidate one or more IO P
dir
entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
* @byte_cnt: number of bytes this mapping covers.
*
*
* Marking the IO P
DIR
entry(ies) as Invalid and invalidate
* Marking the IO P
dir
entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
* is to purge stale entries in the IO TLB when unmapping entries.
*
*
...
@@ -700,14 +693,14 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
...
@@ -700,14 +693,14 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iovp
|=
IOVP_SHIFT
;
/* set "size" field for PCOM */
iovp
|=
IOVP_SHIFT
;
/* set "size" field for PCOM */
/*
/*
** clear I/O P
DIR
entry "valid" bit
** clear I/O P
dir
entry "valid" bit
** Do NOT clear the rest - save it for debugging.
** Do NOT clear the rest - save it for debugging.
** We should only clear bits that have previously
** We should only clear bits that have previously
** been enabled.
** been enabled.
*/
*/
ioc
->
pdir_base
[
off
]
&=
~
(
0x80000000000000FFULL
)
;
ioc
->
pdir_base
[
off
]
&=
~
SBA_VALID_MASK
;
}
else
{
}
else
{
u32
t
=
get_order
(
byte_cnt
)
+
PAGE
_SHIFT
;
u32
t
=
get_order
(
byte_cnt
)
+
IOVP
_SHIFT
;
iovp
|=
t
;
iovp
|=
t
;
ASSERT
(
t
<=
31
);
/* 2GB! Max value of "size" field */
ASSERT
(
t
<=
31
);
/* 2GB! Max value of "size" field */
...
@@ -716,7 +709,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
...
@@ -716,7 +709,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
/* verify this pdir entry is enabled */
/* verify this pdir entry is enabled */
ASSERT
(
ioc
->
pdir_base
[
off
]
>>
63
);
ASSERT
(
ioc
->
pdir_base
[
off
]
>>
63
);
/* clear I/O Pdir entry "valid" bit first */
/* clear I/O Pdir entry "valid" bit first */
ioc
->
pdir_base
[
off
]
&=
~
(
0x80000000000000FFULL
)
;
ioc
->
pdir_base
[
off
]
&=
~
SBA_VALID_MASK
;
off
++
;
off
++
;
byte_cnt
-=
IOVP_SIZE
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
while
(
byte_cnt
>
0
);
...
@@ -744,7 +737,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -744,7 +737,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
u64
*
pdir_start
;
u64
*
pdir_start
;
int
pide
;
int
pide
;
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
unsigned
long
p
ci
_addr
=
virt_to_phys
(
addr
);
unsigned
long
p
hys
_addr
=
virt_to_phys
(
addr
);
#endif
#endif
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
...
@@ -754,7 +747,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -754,7 +747,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
/*
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
*/
if
((
p
ci
_addr
&
~
dev
->
dma_mask
)
==
0
)
{
if
((
p
hys
_addr
&
~
dev
->
dma_mask
)
==
0
)
{
/*
/*
** Device is bit capable of DMA'ing to the buffer...
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
** just return the PCI address of ptr
...
@@ -765,8 +758,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -765,8 +758,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
#endif
DBG_BYPASS
(
"sba_map_single() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
DBG_BYPASS
(
"sba_map_single() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
dev
->
dma_mask
,
p
ci
_addr
);
dev
->
dma_mask
,
p
hys
_addr
);
return
p
ci
_addr
;
return
p
hys
_addr
;
}
}
#endif
#endif
...
@@ -799,7 +792,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -799,7 +792,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
while
(
size
>
0
)
{
while
(
size
>
0
)
{
ASSERT
(((
u8
*
)
pdir_start
)[
7
]
==
0
);
/* verify availability */
ASSERT
(((
u8
*
)
pdir_start
)[
7
]
==
0
);
/* verify availability */
sba_io_pdir_entry
(
pdir_start
,
(
unsigned
long
)
addr
);
sba_io_pdir_entry
(
pdir_start
,
virt_to_phys
(
addr
));
DBG_RUN
(
" pdir 0x%p %lx
\n
"
,
pdir_start
,
*
pdir_start
);
DBG_RUN
(
" pdir 0x%p %lx
\n
"
,
pdir_start
,
*
pdir_start
);
...
@@ -812,7 +806,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -812,7 +806,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
sba_check_pdir
(
ioc
,
"Check after sba_map_single()"
);
sba_check_pdir
(
ioc
,
"Check after sba_map_single()"
);
#endif
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
(
direction
)
);
}
}
/**
/**
...
@@ -866,6 +860,29 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
...
@@ -866,6 +860,29 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
size
+=
offset
;
size
+=
offset
;
size
=
ROUNDUP
(
size
,
IOVP_SIZE
);
size
=
ROUNDUP
(
size
,
IOVP_SIZE
);
#ifdef ENABLE_MARK_CLEAN
/*
** Don't need to hold the spinlock while telling VM pages are "clean".
** The pages are "busy" in the resource map until we mark them free.
** But tell VM pages are clean *before* releasing the resource
** in order to avoid race conditions.
*/
if
(
direction
==
PCI_DMA_FROMDEVICE
)
{
u32
iovp
=
(
u32
)
SBA_IOVP
(
ioc
,
iova
);
unsigned
int
pide
=
PDIR_INDEX
(
iovp
);
u64
*
pdirp
=
&
(
ioc
->
pdir_base
[
pide
]);
size_t
byte_cnt
=
size
;
void
*
addr
;
do
{
addr
=
phys_to_virt
(
sba_io_page
(
pdirp
));
mark_clean
(
addr
,
min
(
byte_cnt
,
IOVP_SIZE
));
pdirp
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
#endif
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
ioc
->
usingle_calls
++
;
ioc
->
usingle_calls
++
;
...
@@ -891,40 +908,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
...
@@ -891,40 +908,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
sba_free_range
(
ioc
,
iova
,
size
);
sba_free_range
(
ioc
,
iova
,
size
);
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
#endif
/* DELAYED_RESOURCE_CNT == 0 */
#endif
/* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN
if
(
direction
==
PCI_DMA_FROMDEVICE
)
{
u32
iovp
=
(
u32
)
SBA_IOVP
(
ioc
,
iova
);
int
off
=
PDIR_INDEX
(
iovp
);
void
*
addr
;
if
(
size
<=
IOVP_SIZE
)
{
addr
=
phys_to_virt
(
ioc
->
pdir_base
[
off
]
&
~
0xE000000000000FFFULL
);
mark_clean
(
addr
,
size
);
}
else
{
size_t
byte_cnt
=
size
;
do
{
addr
=
phys_to_virt
(
ioc
->
pdir_base
[
off
]
&
~
0xE000000000000FFFULL
);
mark_clean
(
addr
,
min
(
byte_cnt
,
IOVP_SIZE
));
off
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
}
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
}
}
...
@@ -980,242 +964,109 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
...
@@ -980,242 +964,109 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
}
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x1UL
#ifdef DEBUG_LARGE_SG_ENTRIES
#ifdef DEBUG_LARGE_SG_ENTRIES
int
dump_run_sg
=
0
;
int
dump_run_sg
=
0
;
#endif
#endif
#define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page)
/**
#define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG))
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static
SBA_INLINE
int
sba_fill_pdir
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
struct
scatterlist
*
dma_sg
=
startsg
;
/* pointer to current DMA */
int
n_mappings
=
0
;
u64
*
pdirp
=
0
;
unsigned
long
dma_offset
=
0
;
dma_sg
--
;
while
(
nents
--
>
0
)
{
int
cnt
=
sba_sg_len
(
startsg
);
sba_sg_len
(
startsg
)
=
0
;
#ifdef DEBUG_LARGE_SG_ENTRIES
if
(
dump_run_sg
)
printk
(
" %2d : %08lx/%05x %p
\n
"
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
cnt
,
sba_sg_buffer
(
startsg
)
);
#else
DBG_RUN_SG
(
" %d : %08lx/%05x %p
\n
"
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
cnt
,
sba_sg_buffer
(
startsg
)
);
#endif
/*
** Look for the start of a new DMA stream
*/
if
((
u64
)
sba_sg_iova
(
startsg
)
&
PIDE_FLAG
)
{
u32
pide
=
(
u64
)
sba_sg_iova
(
startsg
)
&
~
PIDE_FLAG
;
dma_offset
=
(
unsigned
long
)
pide
&
~
IOVP_MASK
;
sba_sg_iova
(
startsg
)
=
0
;
dma_sg
++
;
sba_sg_iova
(
dma_sg
)
=
(
char
*
)(
pide
|
ioc
->
ibase
);
pdirp
=
&
(
ioc
->
pdir_base
[
pide
>>
IOVP_SHIFT
]);
n_mappings
++
;
}
/*
** Look for a VCONTIG chunk
*/
if
(
cnt
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
sba_sg_buffer
(
startsg
);
ASSERT
(
pdirp
);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
sba_sg_len
(
dma_sg
)
+=
cnt
;
cnt
+=
dma_offset
;
dma_offset
=
0
;
/* only want offset on first chunk */
cnt
=
ROUNDUP
(
cnt
,
IOVP_SIZE
);
#ifdef CONFIG_PROC_FS
ioc
->
msg_pages
+=
cnt
>>
IOVP_SHIFT
;
#endif
do
{
sba_io_pdir_entry
(
pdirp
,
vaddr
);
vaddr
+=
IOVP_SIZE
;
cnt
-=
IOVP_SIZE
;
pdirp
++
;
}
while
(
cnt
>
0
);
}
startsg
++
;
}
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg
=
0
;
#endif
return
(
n_mappings
);
}
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on a page boundry.
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
/**
/**
* sba_coalesce_chunks - preprocess the SG list
* sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg:
list of IOVA/size pairs
* @startsg:
input=SG list output=DMA addr/len pairs filled in
* @nents: number of entries in startsg list
* @nents: number of entries in startsg list
* @direction: R/W or both.
*
*
* First pass is to walk the SG list and determine where the breaks are
* Walk the SG list and determine where the breaks are in the DMA stream.
* in the DMA stream. Allocates PDIR entries but does not fill them.
* Allocate IO Pdir resources and fill them in separate loop.
* Returns the number of DMA chunks.
* Returns the number of DMA streams used for output IOVA list.
* Note each DMA stream can consume multiple IO Pdir entries.
*
*
* Doing the fill seperate from the coalescing/allocation keeps the
* Code is written assuming some coalescing is possible.
* code simpler. Future enhancement could make one pass through
* the sglist do both.
*/
*/
static
SBA_INLINE
int
static
SBA_INLINE
int
sba_coalesce_chunks
(
struct
ioc
*
ioc
,
sba_coalesce_chunks
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
struct
scatterlist
*
startsg
,
int
nents
,
int
direction
)
int
nents
)
{
{
struct
scatterlist
*
vcontig_sg
;
/* VCONTIG chunk head */
struct
scatterlist
*
dma_sg
=
startsg
;
/* return array */
unsigned
long
vcontig_len
;
/* len of VCONTIG chunk */
unsigned
long
vcontig_end
;
struct
scatterlist
*
dma_sg
;
/* next DMA stream head */
unsigned
long
dma_offset
,
dma_len
;
/* start/len of DMA stream */
int
n_mappings
=
0
;
int
n_mappings
=
0
;
while
(
nents
>
0
)
{
ASSERT
(
nents
>
1
);
unsigned
long
vaddr
=
(
unsigned
long
)
(
startsg
->
address
);
do
{
unsigned
int
dma_cnt
=
1
;
/* number of pages in DMA stream */
unsigned
int
pide
;
/* index into IO Pdir array */
u64
*
pdirp
;
/* pointer into IO Pdir array */
unsigned
long
dma_offset
,
dma_len
;
/* cumulative DMA stream */
/*
/*
** Prepare for first/next DMA stream
** Prepare for first/next DMA stream
*/
*/
dma_sg
=
vcontig_sg
=
startsg
;
dma_len
=
sba_sg_len
(
startsg
);
dma_len
=
vcontig_len
=
vcontig_end
=
sba_sg_len
(
startsg
);
dma_offset
=
sba_sg_address
(
startsg
);
vcontig_end
+=
vaddr
;
startsg
++
;
dma_offset
=
vaddr
&
~
IOVP_MASK
;
nents
--
;
/* PARANOID: clear entries */
sba_sg_buffer
(
startsg
)
=
sba_sg_iova
(
startsg
);
sba_sg_iova
(
startsg
)
=
0
;
sba_sg_len
(
startsg
)
=
0
;
/*
/*
** This loop terminates one iteration "early" since
** We want to know how many entries can be coalesced
** it's always looking one "ahead".
** before trying to allocate IO Pdir space.
** IOVAs can then be allocated "naturally" aligned
** to take advantage of the block IO TLB flush.
*/
*/
while
(
--
nents
>
0
)
{
while
(
nents
)
{
unsigned
long
vaddr
;
/* tmp */
unsigned
int
end_offset
=
dma_offset
+
dma_len
;
startsg
++
;
/* prev entry must end on a page boundary */
if
(
end_offset
&
IOVP_MASK
)
/* catch brokenness in SCSI layer */
break
;
ASSERT
(
startsg
->
length
<=
DMA_CHUNK_SIZE
);
/*
/* next entry start on a page boundary? */
** First make sure current dma stream won't
if
(
startsg
->
offset
)
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if
(((
dma_len
+
dma_offset
+
startsg
->
length
+
~
IOVP_MASK
)
&
IOVP_MASK
)
>
DMA_CHUNK_SIZE
)
break
;
break
;
/*
/*
** Then look for virtually contiguous blocks.
** make sure current dma stream won't exceed
**
** DMA_CHUNK_SIZE if coalescing entries.
** append the next transaction?
*/
*/
vaddr
=
(
unsigned
long
)
sba_sg_iova
(
startsg
);
if
(((
end_offset
+
startsg
->
length
+
~
IOVP_MASK
)
if
(
vcontig_end
==
vaddr
)
&
IOVP_MASK
)
{
>
DMA_CHUNK_SIZE
)
vcontig_len
+=
sba_sg_len
(
startsg
);
break
;
vcontig_end
+=
sba_sg_len
(
startsg
);
dma_len
+=
sba_sg_len
(
startsg
);
sba_sg_buffer
(
startsg
)
=
(
char
*
)
vaddr
;
sba_sg_iova
(
startsg
)
=
0
;
sba_sg_len
(
startsg
)
=
0
;
continue
;
}
#ifdef DEBUG_LARGE_SG_ENTRIES
dma_len
+=
sba_sg_len
(
startsg
);
dump_run_sg
=
(
vcontig_len
>
IOVP_SIZE
);
startsg
++
;
#endif
nents
--
;
dma_cnt
++
;
}
/*
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
** Not virtually contigous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
sba_sg_len
(
vcontig_sg
)
=
vcontig_len
;
vcontig_sg
=
startsg
;
/* allocate IO Pdir resource.
vcontig_len
=
sba_sg_len
(
startsg
);
** returns index into (u64) IO Pdir array.
** IOVA is formed from this.
*/
pide
=
sba_alloc_range
(
ioc
,
dma_cnt
<<
IOVP_SHIFT
);
pdirp
=
&
(
ioc
->
pdir_base
[
pide
]);
/*
/* fill_pdir: write stream into IO Pdir */
** 3) do the entries end/start on page boundaries?
while
(
dma_cnt
--
)
{
** Don't update vcontig_end until we've checked.
sba_io_pdir_entry
(
pdirp
,
SG_ENT_PHYS_PAGE
(
startsg
));
*/
startsg
++
;
if
(
DMA_CONTIG
(
vcontig_end
,
vaddr
))
pdirp
++
;
{
vcontig_end
=
vcontig_len
+
vaddr
;
dma_len
+=
vcontig_len
;
sba_sg_buffer
(
startsg
)
=
(
char
*
)
vaddr
;
sba_sg_iova
(
startsg
)
=
0
;
continue
;
}
else
{
break
;
}
}
}
/*
/* "output" IOVA */
** End of DMA Stream
sba_sg_iova
(
dma_sg
)
=
SBA_IOVA
(
ioc
,
** Terminate last VCONTIG block.
((
dma_addr_t
)
pide
<<
IOVP_SHIFT
),
** Allocate space for DMA stream.
dma_offset
,
*/
DEFAULT_DMA_HINT_REG
(
direction
));
sba_sg_len
(
vcontig_sg
)
=
vcontig_len
;
sba_sg_iova_len
(
dma_sg
)
=
dma_len
;
dma_len
=
(
dma_len
+
dma_offset
+
~
IOVP_MASK
)
&
IOVP_MASK
;
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
dma_sg
++
;
sba_sg_iova
(
dma_sg
)
=
(
char
*
)
(
PIDE_FLAG
|
(
sba_alloc_range
(
ioc
,
dma_len
)
<<
IOVP_SHIFT
)
|
dma_offset
);
n_mappings
++
;
n_mappings
++
;
}
}
while
(
nents
);
return
n_mappings
;
return
n_mappings
;
}
}
...
@@ -1223,7 +1074,7 @@ sba_coalesce_chunks( struct ioc *ioc,
...
@@ -1223,7 +1074,7 @@ sba_coalesce_chunks( struct ioc *ioc,
/**
/**
* sba_map_sg - map Scatter/Gather list
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @dev: instance of PCI
device
owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @nents: number of entries in list
* @direction: R/W or both.
* @direction: R/W or both.
...
@@ -1234,42 +1085,46 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1234,42 +1085,46 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
int
direction
)
int
direction
)
{
{
struct
ioc
*
ioc
;
struct
ioc
*
ioc
;
int
coalesced
,
filled
=
0
;
int
filled
=
0
;
unsigned
long
flags
;
unsigned
long
flags
;
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
struct
scatterlist
*
sg
;
struct
scatterlist
*
sg
;
#endif
#endif
DBG_RUN_SG
(
"%s() START %d entries
\n
"
,
__FUNCTION__
,
nents
);
DBG_RUN_SG
(
"%s() START %d entries, 0x%p,0x%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_address
(
sglist
),
sba_sg_len
(
sglist
));
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
ASSERT
(
ioc
);
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
if
(
dev
->
dma_mask
>=
ioc
->
dma_mask
)
{
if
(
dev
->
dma_mask
>=
ioc
->
dma_mask
)
{
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
){
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
)
{
sba_sg_
buffer
(
sg
)
=
sba_sg_iova
(
sg
);
sba_sg_
iova
(
sg
)
=
virt_to_phys
(
sba_sg_address
(
sg
)
);
sba_sg_iova
(
sg
)
=
(
char
*
)
virt_to_phys
(
sba_sg_buffer
(
sg
)
);
sba_sg_iova
_len
(
sg
)
=
sba_sg_len
(
sg
);
}
}
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
ioc
->
msg_bypass
++
;
ioc
->
msg_bypass
++
;
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
#endif
DBG_RUN_SG
(
"%s() DONE %d mappings bypassed
\n
"
,
__FUNCTION__
,
filled
);
return
filled
;
return
filled
;
}
}
#endif
#endif
/* Fast path single entry scatterlists. */
/* Fast path single entry scatterlists. */
if
(
nents
==
1
)
{
if
(
nents
==
1
)
{
sba_sg_buffer
(
sglist
)
=
sba_sg_iova
(
sglist
);
sba_sg_iova
(
sglist
)
=
(
char
*
)
sba_map_single
(
dev
,
sba_sg_iova
(
sglist
)
=
(
char
*
)
sba_map_single
(
dev
,
sba_sg_
buffer
(
sglist
),
sba_sg_
iova
(
sglist
),
sba_sg_len
(
sglist
),
direction
);
sba_sg_len
(
sglist
),
direction
);
sba_sg_iova_len
(
sglist
)
=
sba_sg_len
(
sglist
);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
/*
/*
** Should probably do some stats counting, but trying to
** Should probably do some stats counting, but trying to
** be precise quickly starts wasting CPU time.
** be precise quickly starts wasting CPU time.
*/
*/
#endif
#endif
DBG_RUN_SG
(
"%s() DONE 1 mapping
\n
"
,
__FUNCTION__
);
return
1
;
return
1
;
}
}
...
@@ -1286,26 +1141,11 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1286,26 +1141,11 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
ioc
->
msg_calls
++
;
ioc
->
msg_calls
++
;
#endif
#endif
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced
=
sba_coalesce_chunks
(
ioc
,
sglist
,
nents
);
/*
/*
** Program the I/O Pdir
** coalesce and program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/
*/
filled
=
sba_
fill_pdir
(
ioc
,
sglist
,
nents
);
filled
=
sba_
coalesce_chunks
(
ioc
,
sglist
,
nents
,
direction
);
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
if
(
sba_check_pdir
(
ioc
,
"Check after sba_map_sg()"
))
if
(
sba_check_pdir
(
ioc
,
"Check after sba_map_sg()"
))
...
@@ -1317,7 +1157,6 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1317,7 +1157,6 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
ASSERT
(
coalesced
==
filled
);
DBG_RUN_SG
(
"%s() DONE %d mappings
\n
"
,
__FUNCTION__
,
filled
);
DBG_RUN_SG
(
"%s() DONE %d mappings
\n
"
,
__FUNCTION__
,
filled
);
return
filled
;
return
filled
;
...
@@ -1341,8 +1180,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1341,8 +1180,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
unsigned
long
flags
;
unsigned
long
flags
;
#endif
#endif
DBG_RUN_SG
(
"%s() START %d entries,
%p,
%x
\n
"
,
DBG_RUN_SG
(
"%s() START %d entries,
0x%p,0x
%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_
buffer
(
sglist
),
sglist
->
length
);
__FUNCTION__
,
nents
,
sba_sg_
address
(
sglist
),
sba_sg_len
(
sglist
)
);
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
ASSERT
(
ioc
);
...
@@ -1360,7 +1199,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1360,7 +1199,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
while
(
sba_sg_len
(
sglist
)
&&
nents
--
)
{
while
(
sba_sg_len
(
sglist
)
&&
nents
--
)
{
sba_unmap_single
(
dev
,
(
dma_addr_t
)
sba_sg_iova
(
sglist
),
sba_unmap_single
(
dev
,
(
dma_addr_t
)
sba_sg_iova
(
sglist
),
sba_sg_len
(
sglist
),
direction
);
sba_sg_
iova_
len
(
sglist
),
direction
);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
/*
/*
** This leaves inconsistent data in the stats, but we can't
** This leaves inconsistent data in the stats, but we can't
...
@@ -1368,7 +1207,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1368,7 +1207,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
** were coalesced to a single entry. The stats are fun,
** were coalesced to a single entry. The stats are fun,
** but speed is more important.
** but speed is more important.
*/
*/
ioc
->
usg_pages
+=
(((
u64
)
sba_sg_iova
(
sglist
)
&
~
IOVP_MASK
)
+
sba_sg_len
(
sglist
)
+
IOVP_SIZE
-
1
)
>>
PAGE
_SHIFT
;
ioc
->
usg_pages
+=
(((
u64
)
sba_sg_iova
(
sglist
)
&
~
IOVP_MASK
)
+
sba_sg_len
(
sglist
)
+
IOVP_SIZE
-
1
)
>>
IOVP
_SHIFT
;
#endif
#endif
++
sglist
;
++
sglist
;
}
}
...
@@ -1429,12 +1268,12 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
...
@@ -1429,12 +1268,12 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
__FUNCTION__
,
ioc
->
ioc_hpa
,
iova_space_size
>>
20
,
__FUNCTION__
,
ioc
->
ioc_hpa
,
iova_space_size
>>
20
,
iov_order
+
PAGE_SHIFT
,
ioc
->
pdir_size
);
iov_order
+
PAGE_SHIFT
,
ioc
->
pdir_size
);
/*
FIXME :
DMA HINTs not used */
/*
XXX
DMA HINTs not used */
ioc
->
hint_shift_pdir
=
iov_order
+
PAGE_SHIFT
;
ioc
->
hint_shift_pdir
=
iov_order
+
PAGE_SHIFT
;
ioc
->
hint_mask_pdir
=
~
(
0x3
<<
(
iov_order
+
PAGE_SHIFT
));
ioc
->
hint_mask_pdir
=
~
(
0x3
<<
(
iov_order
+
PAGE_SHIFT
));
ioc
->
pdir_base
=
ioc
->
pdir_base
=
pdir_base
=
pdir_base
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
pdir_size
));
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
pdir_size
));
if
(
NULL
==
pdir_base
)
if
(
NULL
==
pdir_base
)
{
{
panic
(
__FILE__
":%s() could not allocate I/O Page Table
\n
"
,
__FUNCTION__
);
panic
(
__FILE__
":%s() could not allocate I/O Page Table
\n
"
,
__FUNCTION__
);
...
@@ -1452,20 +1291,8 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
...
@@ -1452,20 +1291,8 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
/* build IMASK for IOC and Elroy */
/* build IMASK for IOC and Elroy */
iova_space_mask
=
0xffffffff
;
iova_space_mask
=
0xffffffff
;
iova_space_mask
<<=
(
iov_order
+
PAGE
_SHIFT
);
iova_space_mask
<<=
(
iov_order
+
IOVP
_SHIFT
);
#ifdef CONFIG_IA64_HP_PROTO
/*
** REVISIT - this is a kludge, but we won't be supporting anything but
** zx1 2.0 or greater for real. When fw is in shape, ibase will
** be preprogrammed w/ the IOVA hole base and imask will give us
** the size.
*/
if
((
sba_dev
->
hw_rev
&
0xFF
)
<
0x20
)
{
DBG_INIT
(
"%s() Found SBA rev < 2.0, setting IOVA base to 0. This device will not be supported in the future.
\n
"
,
__FUNCTION__
);
ioc
->
ibase
=
0x0
;
}
else
#endif
ioc
->
ibase
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IBASE
)
&
0xFFFFFFFEUL
;
ioc
->
ibase
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IBASE
)
&
0xFFFFFFFEUL
;
ioc
->
imask
=
iova_space_mask
;
/* save it */
ioc
->
imask
=
iova_space_mask
;
/* save it */
...
@@ -1474,7 +1301,7 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
...
@@ -1474,7 +1301,7 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
__FUNCTION__
,
ioc
->
ibase
,
ioc
->
imask
);
__FUNCTION__
,
ioc
->
ibase
,
ioc
->
imask
);
/*
/*
**
FIXME: Hint
registers are programmed with default hint
**
XXX DMA HINT
registers are programmed with default hint
** values during boot, so hints should be sane even if we
** values during boot, so hints should be sane even if we
** can't reprogram them the way drivers want.
** can't reprogram them the way drivers want.
*/
*/
...
@@ -1487,8 +1314,8 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
...
@@ -1487,8 +1314,8 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
*/
*/
ioc
->
imask
|=
0xFFFFFFFF00000000UL
;
ioc
->
imask
|=
0xFFFFFFFF00000000UL
;
/* Set I/O P
DIR P
age size to system page size */
/* Set I/O P
dir p
age size to system page size */
switch
(
PAGE
_SHIFT
)
{
switch
(
IOVP
_SHIFT
)
{
case
12
:
/* 4K */
case
12
:
/* 4K */
tcnfg
=
0
;
tcnfg
=
0
;
break
;
break
;
...
@@ -1636,7 +1463,7 @@ sba_common_init(struct sba_device *sba_dev)
...
@@ -1636,7 +1463,7 @@ sba_common_init(struct sba_device *sba_dev)
res_word
=
(
int
)(
index
/
BITS_PER_LONG
);
res_word
=
(
int
)(
index
/
BITS_PER_LONG
);
mask
=
0x1UL
<<
(
index
-
(
res_word
*
BITS_PER_LONG
));
mask
=
0x1UL
<<
(
index
-
(
res_word
*
BITS_PER_LONG
));
res_ptr
[
res_word
]
|=
mask
;
res_ptr
[
res_word
]
|=
mask
;
sba_dev
->
ioc
[
i
].
pdir_base
[
PDIR_INDEX
(
reserved_iov
)]
=
(
0x80000000000000FFULL
|
reserved_iov
);
sba_dev
->
ioc
[
i
].
pdir_base
[
PDIR_INDEX
(
reserved_iov
)]
=
(
SBA_VALID_MASK
|
reserved_iov
);
}
}
}
}
...
...
arch/ia64/hp/sim/hpsim_console.c
View file @
8beb1642
...
@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned);
...
@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned);
static
kdev_t
simcons_console_device
(
struct
console
*
);
static
kdev_t
simcons_console_device
(
struct
console
*
);
struct
console
hpsim_cons
=
{
struct
console
hpsim_cons
=
{
name:
"simcons"
,
.
name
=
"simcons"
,
write:
simcons_write
,
.
write
=
simcons_write
,
device:
simcons_console_device
,
.
device
=
simcons_console_device
,
setup:
simcons_init
,
.
setup
=
simcons_init
,
flags:
CON_PRINTBUFFER
,
.
flags
=
CON_PRINTBUFFER
,
index:
-
1
,
.
index
=
-
1
,
};
};
static
int
static
int
...
...
arch/ia64/hp/sim/hpsim_irq.c
View file @
8beb1642
...
@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq)
...
@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq)
}
}
static
struct
hw_interrupt_type
irq_type_hp_sim
=
{
static
struct
hw_interrupt_type
irq_type_hp_sim
=
{
typename:
"hpsim"
,
.
typename
=
"hpsim"
,
startup:
hpsim_irq_startup
,
.
startup
=
hpsim_irq_startup
,
shutdown:
hpsim_irq_noop
,
.
shutdown
=
hpsim_irq_noop
,
enable:
hpsim_irq_noop
,
.
enable
=
hpsim_irq_noop
,
disable:
hpsim_irq_noop
,
.
disable
=
hpsim_irq_noop
,
ack:
hpsim_irq_noop
,
.
ack
=
hpsim_irq_noop
,
end:
hpsim_irq_noop
,
.
end
=
hpsim_irq_noop
,
set_affinity:
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
hpsim_irq_noop
,
.
set_affinity
=
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
hpsim_irq_noop
,
};
};
void
__init
void
__init
...
...
arch/ia64/hp/sim/simserial.c
View file @
8beb1642
...
@@ -31,6 +31,7 @@
...
@@ -31,6 +31,7 @@
#include <linux/serialP.h>
#include <linux/serialP.h>
#include <asm/irq.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#ifdef CONFIG_KDB
#ifdef CONFIG_KDB
...
...
arch/ia64/hp/zx1/hpzx1_machvec.c
View file @
8beb1642
#define MACHVEC_PLATFORM_NAME hpzx1
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
#include <asm/machvec_init.h>
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
arch/ia64/ia32/binfmt_elf32.c
View file @
8beb1642
...
@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int
...
@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int
}
}
static
struct
vm_operations_struct
ia32_shared_page_vm_ops
=
{
static
struct
vm_operations_struct
ia32_shared_page_vm_ops
=
{
nopage:
ia32_install_shared_page
.
nopage
=
ia32_install_shared_page
};
};
void
void
...
...
arch/ia64/kernel/acpi.c
View file @
8beb1642
...
@@ -56,6 +56,8 @@ asm (".weak iosapic_version");
...
@@ -56,6 +56,8 @@ asm (".weak iosapic_version");
void
(
*
pm_idle
)
(
void
);
void
(
*
pm_idle
)
(
void
);
void
(
*
pm_power_off
)
(
void
);
void
(
*
pm_power_off
)
(
void
);
unsigned
char
acpi_kbd_controller_present
=
1
;
const
char
*
const
char
*
acpi_get_sysname
(
void
)
acpi_get_sysname
(
void
)
{
{
...
@@ -206,7 +208,7 @@ struct acpi_table_madt * acpi_madt __initdata;
...
@@ -206,7 +208,7 @@ struct acpi_table_madt * acpi_madt __initdata;
static
int
__init
static
int
__init
acpi_parse_lapic_addr_ovr
(
acpi_table_entry_header
*
header
)
acpi_parse_lapic_addr_ovr
(
acpi_table_entry_header
*
header
)
{
{
struct
acpi_table_lapic_addr_ovr
*
lapic
=
NULL
;
struct
acpi_table_lapic_addr_ovr
*
lapic
;
lapic
=
(
struct
acpi_table_lapic_addr_ovr
*
)
header
;
lapic
=
(
struct
acpi_table_lapic_addr_ovr
*
)
header
;
if
(
!
lapic
)
if
(
!
lapic
)
...
@@ -226,7 +228,7 @@ acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
...
@@ -226,7 +228,7 @@ acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
static
int
__init
static
int
__init
acpi_parse_lsapic
(
acpi_table_entry_header
*
header
)
acpi_parse_lsapic
(
acpi_table_entry_header
*
header
)
{
{
struct
acpi_table_lsapic
*
lsapic
=
NULL
;
struct
acpi_table_lsapic
*
lsapic
;
lsapic
=
(
struct
acpi_table_lsapic
*
)
header
;
lsapic
=
(
struct
acpi_table_lsapic
*
)
header
;
if
(
!
lsapic
)
if
(
!
lsapic
)
...
@@ -262,7 +264,7 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
...
@@ -262,7 +264,7 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
static
int
__init
static
int
__init
acpi_parse_lapic_nmi
(
acpi_table_entry_header
*
header
)
acpi_parse_lapic_nmi
(
acpi_table_entry_header
*
header
)
{
{
struct
acpi_table_lapic_nmi
*
lacpi_nmi
=
NULL
;
struct
acpi_table_lapic_nmi
*
lacpi_nmi
;
lacpi_nmi
=
(
struct
acpi_table_lapic_nmi
*
)
header
;
lacpi_nmi
=
(
struct
acpi_table_lapic_nmi
*
)
header
;
if
(
!
lacpi_nmi
)
if
(
!
lacpi_nmi
)
...
@@ -279,7 +281,7 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header)
...
@@ -279,7 +281,7 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header)
static
int
__init
static
int
__init
acpi_find_iosapic
(
int
global_vector
,
u32
*
irq_base
,
char
**
iosapic_address
)
acpi_find_iosapic
(
int
global_vector
,
u32
*
irq_base
,
char
**
iosapic_address
)
{
{
struct
acpi_table_iosapic
*
iosapic
=
NULL
;
struct
acpi_table_iosapic
*
iosapic
;
int
ver
=
0
;
int
ver
=
0
;
int
max_pin
=
0
;
int
max_pin
=
0
;
char
*
p
=
0
;
char
*
p
=
0
;
...
@@ -338,7 +340,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header)
...
@@ -338,7 +340,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header)
static
int
__init
static
int
__init
acpi_parse_plat_int_src
(
acpi_table_entry_header
*
header
)
acpi_parse_plat_int_src
(
acpi_table_entry_header
*
header
)
{
{
struct
acpi_table_plat_int_src
*
plintsrc
=
NULL
;
struct
acpi_table_plat_int_src
*
plintsrc
;
int
vector
=
0
;
int
vector
=
0
;
u32
irq_base
=
0
;
u32
irq_base
=
0
;
char
*
iosapic_address
=
NULL
;
char
*
iosapic_address
=
NULL
;
...
@@ -381,7 +383,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
...
@@ -381,7 +383,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
static
int
__init
static
int
__init
acpi_parse_int_src_ovr
(
acpi_table_entry_header
*
header
)
acpi_parse_int_src_ovr
(
acpi_table_entry_header
*
header
)
{
{
struct
acpi_table_int_src_ovr
*
p
=
NULL
;
struct
acpi_table_int_src_ovr
*
p
;
p
=
(
struct
acpi_table_int_src_ovr
*
)
header
;
p
=
(
struct
acpi_table_int_src_ovr
*
)
header
;
if
(
!
p
)
if
(
!
p
)
...
@@ -404,7 +406,7 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header)
...
@@ -404,7 +406,7 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header)
static
int
__init
static
int
__init
acpi_parse_nmi_src
(
acpi_table_entry_header
*
header
)
acpi_parse_nmi_src
(
acpi_table_entry_header
*
header
)
{
{
struct
acpi_table_nmi_src
*
nmi_src
=
NULL
;
struct
acpi_table_nmi_src
*
nmi_src
;
nmi_src
=
(
struct
acpi_table_nmi_src
*
)
header
;
nmi_src
=
(
struct
acpi_table_nmi_src
*
)
header
;
if
(
!
nmi_src
)
if
(
!
nmi_src
)
...
@@ -425,10 +427,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
...
@@ -425,10 +427,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
return
-
EINVAL
;
return
-
EINVAL
;
acpi_madt
=
(
struct
acpi_table_madt
*
)
__va
(
phys_addr
);
acpi_madt
=
(
struct
acpi_table_madt
*
)
__va
(
phys_addr
);
if
(
!
acpi_madt
)
{
printk
(
KERN_WARNING
PREFIX
"Unable to map MADT
\n
"
);
return
-
ENODEV
;
}
/* Get base address of IPI Message Block */
/* Get base address of IPI Message Block */
...
@@ -442,6 +440,28 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
...
@@ -442,6 +440,28 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
}
}
static
int
__init
acpi_parse_fadt
(
unsigned
long
phys_addr
,
unsigned
long
size
)
{
struct
acpi_table_header
*
fadt_header
;
fadt_descriptor_rev2
*
fadt
;
if
(
!
phys_addr
||
!
size
)
return
-
EINVAL
;
fadt_header
=
(
struct
acpi_table_header
*
)
__va
(
phys_addr
);
if
(
fadt_header
->
revision
!=
3
)
return
-
ENODEV
;
/* Only deal with ACPI 2.0 FADT */
fadt
=
(
fadt_descriptor_rev2
*
)
fadt_header
;
if
(
!
(
fadt
->
iapc_boot_arch
&
BAF_8042_KEYBOARD_CONTROLLER
))
acpi_kbd_controller_present
=
0
;
return
0
;
}
int
__init
int
__init
acpi_find_rsdp
(
unsigned
long
*
rsdp_phys
)
acpi_find_rsdp
(
unsigned
long
*
rsdp_phys
)
{
{
...
@@ -467,8 +487,8 @@ acpi_find_rsdp (unsigned long *rsdp_phys)
...
@@ -467,8 +487,8 @@ acpi_find_rsdp (unsigned long *rsdp_phys)
static
int
__init
static
int
__init
acpi_parse_spcr
(
unsigned
long
phys_addr
,
unsigned
long
size
)
acpi_parse_spcr
(
unsigned
long
phys_addr
,
unsigned
long
size
)
{
{
acpi_ser_t
*
spcr
=
NULL
;
acpi_ser_t
*
spcr
;
unsigned
long
global_int
=
0
;
unsigned
long
global_int
;
if
(
!
phys_addr
||
!
size
)
if
(
!
phys_addr
||
!
size
)
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -486,11 +506,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
...
@@ -486,11 +506,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
*/
*/
spcr
=
(
acpi_ser_t
*
)
__va
(
phys_addr
);
spcr
=
(
acpi_ser_t
*
)
__va
(
phys_addr
);
if
(
!
spcr
)
{
printk
(
KERN_WARNING
PREFIX
"Unable to map SPCR
\n
"
);
return
-
ENODEV
;
}
setup_serial_acpi
(
spcr
);
setup_serial_acpi
(
spcr
);
if
(
spcr
->
length
<
sizeof
(
acpi_ser_t
))
if
(
spcr
->
length
<
sizeof
(
acpi_ser_t
))
...
@@ -527,11 +542,11 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
...
@@ -527,11 +542,11 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
int
__init
int
__init
acpi_boot_init
(
char
*
cmdline
)
acpi_boot_init
(
char
*
cmdline
)
{
{
int
result
=
0
;
int
result
;
/* Initialize the ACPI boot-time table parser */
/* Initialize the ACPI boot-time table parser */
result
=
acpi_table_init
(
cmdline
);
result
=
acpi_table_init
(
cmdline
);
if
(
0
!=
result
)
if
(
result
)
return
result
;
return
result
;
/*
/*
...
@@ -542,57 +557,49 @@ acpi_boot_init (char *cmdline)
...
@@ -542,57 +557,49 @@ acpi_boot_init (char *cmdline)
* information -- the successor to MPS tables.
* information -- the successor to MPS tables.
*/
*/
result
=
acpi_table_parse
(
ACPI_APIC
,
acpi_parse_madt
);
if
(
acpi_table_parse
(
ACPI_APIC
,
acpi_parse_madt
)
<
1
)
{
if
(
1
>
result
)
printk
(
KERN_ERR
PREFIX
"Can't find MADT
\n
"
);
return
result
;
goto
skip_madt
;
}
/* Local APIC */
/* Local APIC */
result
=
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_ADDR_OVR
,
acpi_parse_lapic_addr_ovr
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_ADDR_OVR
,
if
(
0
>
result
)
{
acpi_parse_lapic_addr_ovr
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing LAPIC address override entry
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing LAPIC address override entry
\n
"
);
return
result
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_LSAPIC
,
acpi_parse_lsapic
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_LSAPIC
,
if
(
1
>
result
)
{
acpi_parse_lsapic
)
<
1
)
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no LAPIC entries!
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no LAPIC entries
\n
"
);
return
-
ENODEV
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_NMI
,
acpi_parse_lapic_nmi
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_NMI
,
if
(
0
>
result
)
{
acpi_parse_lapic_nmi
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing LAPIC NMI entry
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing LAPIC NMI entry
\n
"
);
return
result
;
}
/* I/O APIC */
/* I/O APIC */
result
=
acpi_table_parse_madt
(
ACPI_MADT_IOSAPIC
,
acpi_parse_iosapic
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_IOSAPIC
,
if
(
1
>
result
)
{
acpi_parse_iosapic
)
<
1
)
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no IOAPIC entries!
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no IOAPIC entries
\n
"
);
return
((
result
==
0
)
?
-
ENODEV
:
result
);
}
/* System-Level Interrupt Routing */
/* System-Level Interrupt Routing */
result
=
acpi_table_parse_madt
(
ACPI_MADT_PLAT_INT_SRC
,
acpi_parse_plat_int_src
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_PLAT_INT_SRC
,
if
(
0
>
result
)
{
acpi_parse_plat_int_src
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing platform interrupt source entry
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing platform interrupt source entry
\n
"
);
return
result
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_INT_SRC_OVR
,
acpi_parse_int_src_ovr
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_INT_SRC_OVR
,
if
(
0
>
result
)
{
acpi_parse_int_src_ovr
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing interrupt source overrides entry
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing interrupt source overrides entry
\n
"
);
return
result
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_NMI_SRC
,
acpi_parse_nmi_src
);
if
(
acpi_table_parse_madt
(
ACPI_MADT_NMI_SRC
,
if
(
0
>
result
)
{
acpi_parse_nmi_src
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing NMI SRC entry
\n
"
);
printk
(
KERN_ERR
PREFIX
"Error parsing NMI SRC entry
\n
"
);
return
result
;
skip_madt:
}
/* FADT says whether a legacy keyboard controller is present. */
if
(
acpi_table_parse
(
ACPI_FACP
,
acpi_parse_fadt
)
<
1
)
printk
(
KERN_ERR
PREFIX
"Can't find FADT
\n
"
);
#ifdef CONFIG_SERIAL_ACPI
#ifdef CONFIG_SERIAL_ACPI
/*
/*
...
@@ -602,7 +609,7 @@ acpi_boot_init (char *cmdline)
...
@@ -602,7 +609,7 @@ acpi_boot_init (char *cmdline)
* serial ports, EC, SMBus, etc.
* serial ports, EC, SMBus, etc.
*/
*/
acpi_table_parse
(
ACPI_SPCR
,
acpi_parse_spcr
);
acpi_table_parse
(
ACPI_SPCR
,
acpi_parse_spcr
);
#endif
/*CONFIG_SERIAL_ACPI*/
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
if
(
available_cpus
==
0
)
{
if
(
available_cpus
==
0
)
{
...
@@ -625,9 +632,9 @@ acpi_boot_init (char *cmdline)
...
@@ -625,9 +632,9 @@ acpi_boot_init (char *cmdline)
int
__init
int
__init
acpi_get_prt
(
struct
pci_vector_struct
**
vectors
,
int
*
count
)
acpi_get_prt
(
struct
pci_vector_struct
**
vectors
,
int
*
count
)
{
{
struct
pci_vector_struct
*
vector
=
NULL
;
struct
pci_vector_struct
*
vector
;
struct
list_head
*
node
=
NULL
;
struct
list_head
*
node
;
struct
acpi_prt_entry
*
entry
=
NULL
;
struct
acpi_prt_entry
*
entry
;
int
i
=
0
;
int
i
=
0
;
if
(
!
vectors
||
!
count
)
if
(
!
vectors
||
!
count
)
...
...
arch/ia64/kernel/efi.c
View file @
8beb1642
...
@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv)
...
@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv)
tv
->
tv_usec
=
tm
.
nanosecond
/
1000
;
tv
->
tv_usec
=
tm
.
nanosecond
/
1000
;
}
}
static
int
is_available_memory
(
efi_memory_desc_t
*
md
)
{
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
return
0
;
switch
(
md
->
type
)
{
case
EFI_LOADER_CODE
:
case
EFI_LOADER_DATA
:
case
EFI_BOOT_SERVICES_CODE
:
case
EFI_BOOT_SERVICES_DATA
:
case
EFI_CONVENTIONAL_MEMORY
:
return
1
;
}
return
0
;
}
/*
* Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers
* memory that is normally available to the kernel, issue a warning that some memory
* is being ignored.
*/
static
void
trim_bottom
(
efi_memory_desc_t
*
md
,
u64
start_addr
)
{
u64
num_skipped_pages
;
if
(
md
->
phys_addr
>=
start_addr
||
!
md
->
num_pages
)
return
;
num_skipped_pages
=
(
start_addr
-
md
->
phys_addr
)
>>
EFI_PAGE_SHIFT
;
if
(
num_skipped_pages
>
md
->
num_pages
)
num_skipped_pages
=
md
->
num_pages
;
if
(
is_available_memory
(
md
))
printk
(
KERN_NOTICE
"efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx
\n
"
,
__FUNCTION__
,
(
num_skipped_pages
<<
EFI_PAGE_SHIFT
)
>>
10
,
md
->
phys_addr
,
start_addr
-
IA64_GRANULE_SIZE
);
/*
* NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory
* descriptor list to become unsorted. In such a case, md->num_pages will be
* zero, so the Right Thing will happen.
*/
md
->
phys_addr
+=
num_skipped_pages
<<
EFI_PAGE_SHIFT
;
md
->
num_pages
-=
num_skipped_pages
;
}
static
void
trim_top
(
efi_memory_desc_t
*
md
,
u64
end_addr
)
{
u64
num_dropped_pages
,
md_end_addr
;
md_end_addr
=
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
);
if
(
md_end_addr
<=
end_addr
||
!
md
->
num_pages
)
return
;
num_dropped_pages
=
(
md_end_addr
-
end_addr
)
>>
EFI_PAGE_SHIFT
;
if
(
num_dropped_pages
>
md
->
num_pages
)
num_dropped_pages
=
md
->
num_pages
;
if
(
is_available_memory
(
md
))
printk
(
KERN_NOTICE
"efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx
\n
"
,
__FUNCTION__
,
(
num_dropped_pages
<<
EFI_PAGE_SHIFT
)
>>
10
,
md
->
phys_addr
,
end_addr
);
md
->
num_pages
-=
num_dropped_pages
;
}
/*
/*
* Walks the EFI memory map and calls CALLBACK once for each EFI
* Walks the EFI memory map and calls CALLBACK once for each EFI
memory descriptor that
*
memory descriptor that
has memory that is available for OS use.
* has memory that is available for OS use.
*/
*/
void
void
efi_memmap_walk
(
efi_freemem_callback_t
callback
,
void
*
arg
)
efi_memmap_walk
(
efi_freemem_callback_t
callback
,
void
*
arg
)
...
@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
...
@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
u64
start
;
u64
start
;
u64
end
;
u64
end
;
}
prev
,
curr
;
}
prev
,
curr
;
void
*
efi_map_start
,
*
efi_map_end
,
*
p
;
void
*
efi_map_start
,
*
efi_map_end
,
*
p
,
*
q
;
efi_memory_desc_t
*
md
;
efi_memory_desc_t
*
md
,
*
check_md
;
u64
efi_desc_size
,
start
,
end
;
u64
efi_desc_size
,
start
,
end
,
granule_addr
,
first_non_wb_addr
=
0
;
efi_map_start
=
__va
(
ia64_boot_param
->
efi_memmap
);
efi_map_start
=
__va
(
ia64_boot_param
->
efi_memmap
);
efi_map_end
=
efi_map_start
+
ia64_boot_param
->
efi_memmap_size
;
efi_map_end
=
efi_map_start
+
ia64_boot_param
->
efi_memmap_size
;
...
@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
...
@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
for
(
p
=
efi_map_start
;
p
<
efi_map_end
;
p
+=
efi_desc_size
)
{
for
(
p
=
efi_map_start
;
p
<
efi_map_end
;
p
+=
efi_desc_size
)
{
md
=
p
;
md
=
p
;
switch
(
md
->
type
)
{
case
EFI_LOADER_CODE
:
/* skip over non-WB memory descriptors; that's all we're interested in... */
case
EFI_LOADER_DATA
:
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
case
EFI_BOOT_SERVICES_CODE
:
continue
;
case
EFI_BOOT_SERVICES_DATA
:
case
EFI_CONVENTIONAL_MEMORY
:
if
(
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
>
first_non_wb_addr
)
{
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
/*
continue
;
* Search for the next run of contiguous WB memory. Start search
* at first granule boundary covered by md.
*/
granule_addr
=
((
md
->
phys_addr
+
IA64_GRANULE_SIZE
-
1
)
&
-
IA64_GRANULE_SIZE
);
first_non_wb_addr
=
granule_addr
;
for
(
q
=
p
;
q
<
efi_map_end
;
q
+=
efi_desc_size
)
{
check_md
=
q
;
if
(
check_md
->
attribute
&
EFI_MEMORY_WB
)
trim_bottom
(
md
,
granule_addr
);
if
(
check_md
->
phys_addr
<
granule_addr
)
continue
;
if
(
!
(
check_md
->
attribute
&
EFI_MEMORY_WB
))
break
;
/* hit a non-WB region; stop search */
if
(
check_md
->
phys_addr
!=
first_non_wb_addr
)
break
;
/* hit a memory hole; stop search */
first_non_wb_addr
+=
check_md
->
num_pages
<<
EFI_PAGE_SHIFT
;
}
/* round it down to the previous granule-boundary: */
first_non_wb_addr
&=
-
IA64_GRANULE_SIZE
;
if
(
!
(
first_non_wb_addr
>
granule_addr
))
continue
;
/* couldn't find enough contiguous memory */
}
/* BUG_ON((md->phys_addr >> IA64_GRANULE_SHIFT) < first_non_wb_addr); */
trim_top
(
md
,
first_non_wb_addr
);
if
(
is_available_memory
(
md
))
{
if
(
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
>
mem_limit
)
{
if
(
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
>
mem_limit
)
{
if
(
md
->
phys_addr
>
mem_limit
)
if
(
md
->
phys_addr
>
mem_limit
)
continue
;
continue
;
md
->
num_pages
=
(
mem_limit
-
md
->
phys_addr
)
>>
EFI_PAGE_SHIFT
;
md
->
num_pages
=
(
mem_limit
-
md
->
phys_addr
)
>>
EFI_PAGE_SHIFT
;
}
}
if
(
md
->
num_pages
==
0
)
{
printk
(
"efi_memmap_walk: ignoring empty region at 0x%lx"
,
if
(
md
->
num_pages
==
0
)
md
->
phys_addr
);
continue
;
continue
;
}
curr
.
start
=
PAGE_OFFSET
+
md
->
phys_addr
;
curr
.
start
=
PAGE_OFFSET
+
md
->
phys_addr
;
curr
.
end
=
curr
.
start
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
);
curr
.
end
=
curr
.
start
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
);
...
@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
...
@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
prev
=
curr
;
prev
=
curr
;
}
}
}
}
break
;
default:
continue
;
}
}
}
}
if
(
prev_valid
)
{
if
(
prev_valid
)
{
...
@@ -268,8 +366,9 @@ efi_map_pal_code (void)
...
@@ -268,8 +366,9 @@ efi_map_pal_code (void)
*/
*/
psr
=
ia64_clear_ic
();
psr
=
ia64_clear_ic
();
ia64_itr
(
0x1
,
IA64_TR_PALCODE
,
vaddr
&
mask
,
ia64_itr
(
0x1
,
IA64_TR_PALCODE
,
vaddr
&
mask
,
pte_val
(
pfn_pte
(
md
->
phys_addr
>>
PAGE_SHIFT
,
PAGE_KERNEL
)),
IA64_GRANULE_SHIFT
);
pte_val
(
pfn_pte
(
md
->
phys_addr
>>
PAGE_SHIFT
,
PAGE_KERNEL
)),
ia64_set_psr
(
psr
);
IA64_GRANULE_SHIFT
);
ia64_set_psr
(
psr
);
/* restore psr */
ia64_srlz_i
();
ia64_srlz_i
();
}
}
}
}
...
@@ -376,7 +475,7 @@ efi_init (void)
...
@@ -376,7 +475,7 @@ efi_init (void)
md
=
p
;
md
=
p
;
printk
(
"mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)
\n
"
,
printk
(
"mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)
\n
"
,
i
,
md
->
type
,
md
->
attribute
,
md
->
phys_addr
,
i
,
md
->
type
,
md
->
attribute
,
md
->
phys_addr
,
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
-
1
,
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
),
md
->
num_pages
>>
(
20
-
EFI_PAGE_SHIFT
));
md
->
num_pages
>>
(
20
-
EFI_PAGE_SHIFT
));
}
}
}
}
...
...
arch/ia64/kernel/init_task.c
View file @
8beb1642
...
@@ -34,8 +34,8 @@ union init_thread {
...
@@ -34,8 +34,8 @@ union init_thread {
}
s
;
}
s
;
unsigned
long
stack
[
KERNEL_STACK_SIZE
/
sizeof
(
unsigned
long
)];
unsigned
long
stack
[
KERNEL_STACK_SIZE
/
sizeof
(
unsigned
long
)];
}
init_thread_union
__attribute__
((
section
(
".data.init_task"
)))
=
{{
}
init_thread_union
__attribute__
((
section
(
".data.init_task"
)))
=
{{
task:
INIT_TASK
(
init_thread_union
.
s
.
task
),
.
task
=
INIT_TASK
(
init_thread_union
.
s
.
task
),
thread_info:
INIT_THREAD_INFO
(
init_thread_union
.
s
.
thread_info
)
.
thread_info
=
INIT_THREAD_INFO
(
init_thread_union
.
s
.
thread_info
)
}};
}};
asm
(
".global init_task; init_task = init_thread_union"
);
asm
(
".global init_task; init_task = init_thread_union"
);
arch/ia64/kernel/iosapic.c
View file @
8beb1642
...
@@ -88,7 +88,7 @@ static struct {
...
@@ -88,7 +88,7 @@ static struct {
static
struct
iosapic_irq
{
static
struct
iosapic_irq
{
char
*
addr
;
/* base address of IOSAPIC */
char
*
addr
;
/* base address of IOSAPIC */
unsigned
char
base_irq
;
/* first irq assigned to this IOSAPIC */
unsigned
int
base_irq
;
/* first irq assigned to this IOSAPIC */
char
pin
;
/* IOSAPIC pin (-1 => not an IOSAPIC irq) */
char
pin
;
/* IOSAPIC pin (-1 => not an IOSAPIC irq) */
unsigned
char
dmode
:
3
;
/* delivery mode (see iosapic.h) */
unsigned
char
dmode
:
3
;
/* delivery mode (see iosapic.h) */
unsigned
char
polarity
:
1
;
/* interrupt polarity (see iosapic.h) */
unsigned
char
polarity
:
1
;
/* interrupt polarity (see iosapic.h) */
...
@@ -97,9 +97,9 @@ static struct iosapic_irq {
...
@@ -97,9 +97,9 @@ static struct iosapic_irq {
static
struct
iosapic
{
static
struct
iosapic
{
char
*
addr
;
/* base address of IOSAPIC */
char
*
addr
;
/* base address of IOSAPIC */
unsigned
char
pcat_compat
;
/* 8259 compatibility flag */
unsigned
int
base_irq
;
/* first irq assigned to this IOSAPIC */
unsigned
char
base_irq
;
/* first irq assigned to this IOSAPIC */
unsigned
short
max_pin
;
/* max input pin supported in this IOSAPIC */
unsigned
short
max_pin
;
/* max input pin supported in this IOSAPIC */
unsigned
char
pcat_compat
;
/* 8259 compatibility flag */
}
iosapic_lists
[
256
]
__initdata
;
}
iosapic_lists
[
256
]
__initdata
;
static
int
num_iosapic
=
0
;
static
int
num_iosapic
=
0
;
...
@@ -322,14 +322,14 @@ iosapic_end_level_irq (unsigned int irq)
...
@@ -322,14 +322,14 @@ iosapic_end_level_irq (unsigned int irq)
#define iosapic_ack_level_irq nop
#define iosapic_ack_level_irq nop
struct
hw_interrupt_type
irq_type_iosapic_level
=
{
struct
hw_interrupt_type
irq_type_iosapic_level
=
{
typename:
"IO-SAPIC-level"
,
.
typename
=
"IO-SAPIC-level"
,
startup:
iosapic_startup_level_irq
,
.
startup
=
iosapic_startup_level_irq
,
shutdown:
iosapic_shutdown_level_irq
,
.
shutdown
=
iosapic_shutdown_level_irq
,
enable:
iosapic_enable_level_irq
,
.
enable
=
iosapic_enable_level_irq
,
disable:
iosapic_disable_level_irq
,
.
disable
=
iosapic_disable_level_irq
,
ack:
iosapic_ack_level_irq
,
.
ack
=
iosapic_ack_level_irq
,
end:
iosapic_end_level_irq
,
.
end
=
iosapic_end_level_irq
,
set_affinity:
iosapic_set_affinity
.
set_affinity
=
iosapic_set_affinity
};
};
/*
/*
...
@@ -366,14 +366,14 @@ iosapic_ack_edge_irq (unsigned int irq)
...
@@ -366,14 +366,14 @@ iosapic_ack_edge_irq (unsigned int irq)
#define iosapic_end_edge_irq nop
#define iosapic_end_edge_irq nop
struct
hw_interrupt_type
irq_type_iosapic_edge
=
{
struct
hw_interrupt_type
irq_type_iosapic_edge
=
{
typename:
"IO-SAPIC-edge"
,
.
typename
=
"IO-SAPIC-edge"
,
startup:
iosapic_startup_edge_irq
,
.
startup
=
iosapic_startup_edge_irq
,
shutdown:
iosapic_disable_edge_irq
,
.
shutdown
=
iosapic_disable_edge_irq
,
enable:
iosapic_enable_edge_irq
,
.
enable
=
iosapic_enable_edge_irq
,
disable:
iosapic_disable_edge_irq
,
.
disable
=
iosapic_disable_edge_irq
,
ack:
iosapic_ack_edge_irq
,
.
ack
=
iosapic_ack_edge_irq
,
end:
iosapic_end_edge_irq
,
.
end
=
iosapic_end_edge_irq
,
set_affinity:
iosapic_set_affinity
.
set_affinity
=
iosapic_set_affinity
};
};
unsigned
int
unsigned
int
...
@@ -679,11 +679,10 @@ iosapic_init_pci_irq (void)
...
@@ -679,11 +679,10 @@ iosapic_init_pci_irq (void)
pci_irq
.
route
[
i
].
bus
,
pci_irq
.
route
[
i
].
pci_id
>>
16
,
pci_irq
.
route
[
i
].
pin
,
pci_irq
.
route
[
i
].
bus
,
pci_irq
.
route
[
i
].
pci_id
>>
16
,
pci_irq
.
route
[
i
].
pin
,
iosapic_irq
[
vector
].
base_irq
+
iosapic_irq
[
vector
].
pin
,
vector
);
iosapic_irq
[
vector
].
base_irq
+
iosapic_irq
[
vector
].
pin
,
vector
);
#endif
#endif
/*
/*
* Forget not to program the IOSAPIC RTE per ACPI _PRT
* NOTE: The IOSAPIC RTE will be programmed in iosapic_pci_fixup(). It
* needs to be done there to ensure PCI hotplug works right.
*/
*/
set_rte
(
vector
,
(
ia64_get_lid
()
>>
16
)
&
0xffff
);
}
}
}
}
...
...
arch/ia64/kernel/irq_ia64.c
View file @
8beb1642
...
@@ -36,6 +36,10 @@
...
@@ -36,6 +36,10 @@
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/system.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
#endif
#define IRQ_DEBUG 0
#define IRQ_DEBUG 0
/* default base addr of IPI table */
/* default base addr of IPI table */
...
@@ -144,9 +148,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
...
@@ -144,9 +148,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
extern
void
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
);
extern
void
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
);
static
struct
irqaction
ipi_irqaction
=
{
static
struct
irqaction
ipi_irqaction
=
{
handler:
handle_IPI
,
.
handler
=
handle_IPI
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"IPI"
.
name
=
"IPI"
};
};
#endif
#endif
...
@@ -172,6 +176,9 @@ init_IRQ (void)
...
@@ -172,6 +176,9 @@ init_IRQ (void)
register_percpu_irq
(
IA64_SPURIOUS_INT_VECTOR
,
NULL
);
register_percpu_irq
(
IA64_SPURIOUS_INT_VECTOR
,
NULL
);
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
register_percpu_irq
(
IA64_IPI_VECTOR
,
&
ipi_irqaction
);
register_percpu_irq
(
IA64_IPI_VECTOR
,
&
ipi_irqaction
);
#endif
#ifdef CONFIG_PERFMON
perfmon_init_percpu
();
#endif
#endif
platform_irq_init
();
platform_irq_init
();
}
}
...
...
arch/ia64/kernel/irq_lsapic.c
View file @
8beb1642
...
@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq)
...
@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq)
}
}
struct
hw_interrupt_type
irq_type_ia64_lsapic
=
{
struct
hw_interrupt_type
irq_type_ia64_lsapic
=
{
typename:
"LSAPIC"
,
.
typename
=
"LSAPIC"
,
startup:
lsapic_noop_startup
,
.
startup
=
lsapic_noop_startup
,
shutdown:
lsapic_noop
,
.
shutdown
=
lsapic_noop
,
enable:
lsapic_noop
,
.
enable
=
lsapic_noop
,
disable:
lsapic_noop
,
.
disable
=
lsapic_noop
,
ack:
lsapic_noop
,
.
ack
=
lsapic_noop
,
end:
lsapic_noop
,
.
end
=
lsapic_noop
,
set_affinity:
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
lsapic_noop
.
set_affinity
=
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
lsapic_noop
};
};
arch/ia64/kernel/machvec.c
View file @
8beb1642
...
@@ -11,13 +11,16 @@
...
@@ -11,13 +11,16 @@
struct
ia64_machine_vector
ia64_mv
;
struct
ia64_machine_vector
ia64_mv
;
/*
/*
* Most platforms use this routine for mapping page frame addresses
* Most platforms use this routine for mapping page frame addresses into a memory map
* into a memory map index.
* index.
*
* Note: we can't use __pa() because map_nr_dense(X) MUST map to something >= max_mapnr if
* X is outside the identity mapped kernel space.
*/
*/
unsigned
long
unsigned
long
map_nr_dense
(
unsigned
long
addr
)
map_nr_dense
(
unsigned
long
addr
)
{
{
return
MAP_NR_DENSE
(
addr
)
;
return
(
addr
-
PAGE_OFFSET
)
>>
PAGE_SHIFT
;
}
}
static
struct
ia64_machine_vector
*
static
struct
ia64_machine_vector
*
...
...
arch/ia64/kernel/mca.c
View file @
8beb1642
...
@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void);
...
@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void);
extern
struct
hw_interrupt_type
irq_type_iosapic_level
;
extern
struct
hw_interrupt_type
irq_type_iosapic_level
;
static
struct
irqaction
cmci_irqaction
=
{
static
struct
irqaction
cmci_irqaction
=
{
handler:
ia64_mca_cmc_int_handler
,
.
handler
=
ia64_mca_cmc_int_handler
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"cmc_hndlr"
.
name
=
"cmc_hndlr"
};
};
static
struct
irqaction
mca_rdzv_irqaction
=
{
static
struct
irqaction
mca_rdzv_irqaction
=
{
handler:
ia64_mca_rendez_int_handler
,
.
handler
=
ia64_mca_rendez_int_handler
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"mca_rdzv"
.
name
=
"mca_rdzv"
};
};
static
struct
irqaction
mca_wkup_irqaction
=
{
static
struct
irqaction
mca_wkup_irqaction
=
{
handler:
ia64_mca_wakeup_int_handler
,
.
handler
=
ia64_mca_wakeup_int_handler
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"mca_wkup"
.
name
=
"mca_wkup"
};
};
static
struct
irqaction
mca_cpe_irqaction
=
{
static
struct
irqaction
mca_cpe_irqaction
=
{
handler:
ia64_mca_cpe_int_handler
,
.
handler
=
ia64_mca_cpe_int_handler
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"cpe_hndlr"
.
name
=
"cpe_hndlr"
};
};
/*
/*
...
...
arch/ia64/kernel/mca_asm.S
View file @
8beb1642
...
@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check:
...
@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check:
movl
r3
=
SAL_GET_STATE_INFO
;;
movl
r3
=
SAL_GET_STATE_INFO
;;
DATA_VA_TO_PA
(
r7
)
;; // convert to physical address
DATA_VA_TO_PA
(
r7
)
;; // convert to physical address
ld8
r8
=[
r7
],
8
;; // get pdesc function pointer
ld8
r8
=[
r7
],
8
;; // get pdesc function pointer
DATA_VA_TO_PA
(
r8
)
//
convert
to
physical
address
dep
r8
=
0
,
r8
,
61
,
3
;; // convert SAL VA to PA
ld8
r1
=[
r7
]
;; // set new (ia64_sal) gp
ld8
r1
=[
r7
]
;; // set new (ia64_sal) gp
DATA_VA_TO_PA
(
r1
)
//
convert
to
physical
address
dep
r1
=
0
,
r1
,
61
,
3
;; // convert SAL VA to PA
mov
b6
=
r8
mov
b6
=
r8
alloc
r5
=
ar
.
pfs
,
8
,
0
,
8
,
0
;; // allocate stack frame for SAL call
alloc
r5
=
ar
.
pfs
,
8
,
0
,
8
,
0
;; // allocate stack frame for SAL call
...
...
arch/ia64/kernel/pci.c
View file @
8beb1642
...
@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
...
@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
int
int
pcibios_enable_device
(
struct
pci_dev
*
dev
)
pcibios_enable_device
(
struct
pci_dev
*
dev
)
{
{
u16
cmd
,
old_cmd
;
int
idx
;
struct
resource
*
r
;
if
(
!
dev
)
if
(
!
dev
)
return
-
EINVAL
;
return
-
EINVAL
;
/* Not needed, since we enable all devices at startup. */
pci_read_config_word
(
dev
,
PCI_COMMAND
,
&
cmd
);
old_cmd
=
cmd
;
for
(
idx
=
0
;
idx
<
6
;
idx
++
)
{
r
=
&
dev
->
resource
[
idx
];
if
(
!
r
->
start
&&
r
->
end
)
{
printk
(
KERN_ERR
"PCI: Device %s not available because of resource collisions
\n
"
,
dev
->
slot_name
);
return
-
EINVAL
;
}
if
(
r
->
flags
&
IORESOURCE_IO
)
cmd
|=
PCI_COMMAND_IO
;
if
(
r
->
flags
&
IORESOURCE_MEM
)
cmd
|=
PCI_COMMAND_MEMORY
;
}
if
(
dev
->
resource
[
PCI_ROM_RESOURCE
].
start
)
cmd
|=
PCI_COMMAND_MEMORY
;
if
(
cmd
!=
old_cmd
)
{
printk
(
"PCI: Enabling device %s (%04x -> %04x)
\n
"
,
dev
->
slot_name
,
old_cmd
,
cmd
);
pci_write_config_word
(
dev
,
PCI_COMMAND
,
cmd
);
}
printk
(
KERN_INFO
"PCI: Found IRQ %d for device %s
\n
"
,
dev
->
irq
,
dev
->
slot_name
);
printk
(
KERN_INFO
"PCI: Found IRQ %d for device %s
\n
"
,
dev
->
irq
,
dev
->
slot_name
);
return
0
;
return
0
;
}
}
...
...
arch/ia64/kernel/perfmon.c
View file @
8beb1642
...
@@ -106,6 +106,12 @@
...
@@ -106,6 +106,12 @@
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/*
/*
* debugging
* debugging
*/
*/
...
@@ -277,8 +283,8 @@ typedef struct {
...
@@ -277,8 +283,8 @@ typedef struct {
typedef
struct
{
typedef
struct
{
pfm_pmu_reg_type_t
type
;
pfm_pmu_reg_type_t
type
;
int
pm_pos
;
int
pm_pos
;
int
(
*
read_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
);
int
(
*
read_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
int
(
*
write_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
);
int
(
*
write_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
unsigned
long
dep_pmd
[
4
];
unsigned
long
dep_pmd
[
4
];
unsigned
long
dep_pmc
[
4
];
unsigned
long
dep_pmc
[
4
];
}
pfm_reg_desc_t
;
}
pfm_reg_desc_t
;
...
@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values
...
@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values
static
void
pfm_vm_close
(
struct
vm_area_struct
*
area
);
static
void
pfm_vm_close
(
struct
vm_area_struct
*
area
);
static
struct
vm_operations_struct
pfm_vm_ops
=
{
static
struct
vm_operations_struct
pfm_vm_ops
=
{
close:
pfm_vm_close
.
close
=
pfm_vm_close
};
};
/*
/*
...
@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
...
@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
/*
/*
* and it must be a valid CPU
* and it must be a valid CPU
*/
*/
cpu
=
ff
s
(
pfx
->
ctx_cpu_mask
);
cpu
=
ff
z
(
~
pfx
->
ctx_cpu_mask
);
if
(
!
cpu_online
(
cpu
)
)
{
if
(
cpu_is_online
(
cpu
)
==
0
)
{
DBprintk
((
"CPU%d is not online
\n
"
,
cpu
));
DBprintk
((
"CPU%d is not online
\n
"
,
cpu
));
return
-
EINVAL
;
return
-
EINVAL
;
}
}
...
@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
...
@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
DBprintk
((
"must have notify_pid when blocking for [%d]
\n
"
,
task
->
pid
));
DBprintk
((
"must have notify_pid when blocking for [%d]
\n
"
,
task
->
pid
));
return
-
EINVAL
;
return
-
EINVAL
;
}
}
#if 0
if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {
if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {
DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));
DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));
return -EINVAL;
return -EINVAL;
}
}
#endif
}
}
/* probably more to add here */
/* probably more to add here */
...
@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
...
@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
if
(
ctx_flags
&
PFM_FL_SYSTEM_WIDE
)
{
if
(
ctx_flags
&
PFM_FL_SYSTEM_WIDE
)
{
/* at this point, we know there is at least one bit set */
/* at this point, we know there is at least one bit set */
cpu
=
ff
s
(
tmp
.
ctx_cpu_mask
)
-
1
;
cpu
=
ff
z
(
~
tmp
.
ctx_cpu_mask
)
;
DBprintk
((
"requesting CPU%d currently on CPU%d
\n
"
,
cpu
,
smp_processor_id
()));
DBprintk
((
"requesting CPU%d currently on CPU%d
\n
"
,
cpu
,
smp_processor_id
()));
...
@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
...
@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/*
/*
* execute write checker, if any
* execute write checker, if any
*/
*/
if
(
PMC_WR_FUNC
(
cnum
))
ret
=
PMC_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
);
if
(
PMC_WR_FUNC
(
cnum
))
ret
=
PMC_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
,
regs
);
abort_mission:
abort_mission:
if
(
ret
==
-
EINVAL
)
reg_retval
=
PFM_REG_RETFL_EINVAL
;
if
(
ret
==
-
EINVAL
)
reg_retval
=
PFM_REG_RETFL_EINVAL
;
...
@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
...
@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/*
/*
* execute write checker, if any
* execute write checker, if any
*/
*/
if
(
PMD_WR_FUNC
(
cnum
))
ret
=
PMD_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
);
if
(
PMD_WR_FUNC
(
cnum
))
ret
=
PMD_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
,
regs
);
abort_mission:
abort_mission:
if
(
ret
==
-
EINVAL
)
reg_retval
=
PFM_REG_RETFL_EINVAL
;
if
(
ret
==
-
EINVAL
)
reg_retval
=
PFM_REG_RETFL_EINVAL
;
...
@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
...
@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* keep track of what we use */
/* keep track of what we use */
CTX_USED_PMD
(
ctx
,
pmu_conf
.
pmd_desc
[(
cnum
)].
dep_pmd
[
0
]);
CTX_USED_PMD
(
ctx
,
pmu_conf
.
pmd_desc
[(
cnum
)].
dep_pmd
[
0
]);
/* mark this register as used as well */
CTX_USED_PMD
(
ctx
,
RDEP
(
cnum
));
/* writes to unimplemented part is ignored, so this is safe */
/* writes to unimplemented part is ignored, so this is safe */
ia64_set_pmd
(
cnum
,
tmp
.
reg_value
&
pmu_conf
.
perf_ovfl_val
);
ia64_set_pmd
(
cnum
,
tmp
.
reg_value
&
pmu_conf
.
perf_ovfl_val
);
...
@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
...
@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
DBprintk
((
"ctx_last_cpu=%d for [%d]
\n
"
,
atomic_read
(
&
ctx
->
ctx_last_cpu
),
task
->
pid
));
DBprintk
((
"ctx_last_cpu=%d for [%d]
\n
"
,
atomic_read
(
&
ctx
->
ctx_last_cpu
),
task
->
pid
));
for
(
i
=
0
;
i
<
count
;
i
++
,
req
++
)
{
for
(
i
=
0
;
i
<
count
;
i
++
,
req
++
)
{
unsigned
long
reg_val
=
~
0UL
,
ctx_val
=
~
0UL
;
unsigned
long
ctx_val
=
~
0UL
;
if
(
copy_from_user
(
&
tmp
,
req
,
sizeof
(
tmp
)))
return
-
EFAULT
;
if
(
copy_from_user
(
&
tmp
,
req
,
sizeof
(
tmp
)))
return
-
EFAULT
;
...
@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
...
@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
*/
*/
if
(
atomic_read
(
&
ctx
->
ctx_last_cpu
)
==
smp_processor_id
()){
if
(
atomic_read
(
&
ctx
->
ctx_last_cpu
)
==
smp_processor_id
()){
ia64_srlz_d
();
ia64_srlz_d
();
val
=
reg_val
=
ia64_get_pmd
(
cnum
);
val
=
ia64_get_pmd
(
cnum
);
DBprintk
((
"reading pmd[%u]=0x%lx from hw
\n
"
,
cnum
,
val
));
DBprintk
((
"reading pmd[%u]=0x%lx from hw
\n
"
,
cnum
,
val
));
}
else
{
}
else
{
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
...
@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
}
}
#endif
#endif
/* context has been saved */
/* context has been saved */
val
=
reg_val
=
th
->
pmd
[
cnum
];
val
=
th
->
pmd
[
cnum
];
}
}
if
(
PMD_IS_COUNTING
(
cnum
))
{
if
(
PMD_IS_COUNTING
(
cnum
))
{
/*
/*
...
@@ -1493,9 +1502,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
...
@@ -1493,9 +1502,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
val
&=
pmu_conf
.
perf_ovfl_val
;
val
&=
pmu_conf
.
perf_ovfl_val
;
val
+=
ctx_val
=
ctx
->
ctx_soft_pmds
[
cnum
].
val
;
val
+=
ctx_val
=
ctx
->
ctx_soft_pmds
[
cnum
].
val
;
}
else
{
}
val
=
reg_val
=
ia64_get_pmd
(
cnum
);
}
tmp
.
reg_value
=
val
;
tmp
.
reg_value
=
val
;
...
@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
...
@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
* execute read checker, if any
* execute read checker, if any
*/
*/
if
(
PMD_RD_FUNC
(
cnum
))
{
if
(
PMD_RD_FUNC
(
cnum
))
{
ret
=
PMD_RD_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
);
ret
=
PMD_RD_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
,
regs
);
}
}
PFM_REG_RETFLAG_SET
(
tmp
.
reg_flags
,
ret
);
PFM_REG_RETFLAG_SET
(
tmp
.
reg_flags
,
ret
);
DBprintk
((
"read pmd[%u] ret=%d soft_pmd=0x%lx reg=0x%lx pmc=0x%lx
\n
"
,
DBprintk
((
"read pmd[%u] ret=%d value=0x%lx pmc=0x%lx
\n
"
,
cnum
,
ret
,
ctx_val
,
reg_val
,
cnum
,
ret
,
val
,
ia64_get_pmc
(
cnum
)));
ia64_get_pmc
(
cnum
)));
if
(
copy_to_user
(
req
,
&
tmp
,
sizeof
(
tmp
)))
return
-
EFAULT
;
if
(
copy_to_user
(
req
,
&
tmp
,
sizeof
(
tmp
)))
return
-
EFAULT
;
}
}
...
@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task)
...
@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task)
*/
*/
if
(
ctx
&&
ctx
->
ctx_fl_using_dbreg
==
1
)
return
-
1
;
if
(
ctx
&&
ctx
->
ctx_fl_using_dbreg
==
1
)
return
-
1
;
/*
* XXX: not pretty
*/
LOCK_PFS
();
LOCK_PFS
();
/*
/*
* We only allow the use of debug registers when there is no system
* We cannot allow setting breakpoints when system wide monitoring
* wide monitoring
* sessions are using the debug registers.
* XXX: we could relax this by
*/
*/
if
(
pfm_sessions
.
pfs_sys_use_dbregs
>
0
)
if
(
pfm_sessions
.
pfs_sys_use_dbregs
>
0
)
ret
=
-
1
;
ret
=
-
1
;
...
@@ -1921,7 +1923,6 @@ typedef union {
...
@@ -1921,7 +1923,6 @@ typedef union {
dbr_mask_reg_t
dbr
;
dbr_mask_reg_t
dbr
;
}
dbreg_t
;
}
dbreg_t
;
static
int
static
int
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
)
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
)
{
{
...
@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
...
@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
if
(
ctx
->
ctx_fl_system
)
{
if
(
ctx
->
ctx_fl_system
)
{
/* we mark ourselves as owner of the debug registers */
/* we mark ourselves as owner of the debug registers */
ctx
->
ctx_fl_using_dbreg
=
1
;
ctx
->
ctx_fl_using_dbreg
=
1
;
}
else
{
DBprintk
((
"system-wide setting fl_using_dbreg for [%d]
\n
"
,
task
->
pid
));
if
(
ctx
->
ctx_fl_using_dbreg
==
0
)
{
}
else
if
(
first_time
)
{
ret
=
-
EBUSY
;
ret
=
-
EBUSY
;
if
((
thread
->
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
{
if
((
thread
->
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
{
DBprintk
((
"debug registers already in use for [%d]
\n
"
,
task
->
pid
));
DBprintk
((
"debug registers already in use for [%d]
\n
"
,
task
->
pid
));
...
@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
...
@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
/* we mark ourselves as owner of the debug registers */
/* we mark ourselves as owner of the debug registers */
ctx
->
ctx_fl_using_dbreg
=
1
;
ctx
->
ctx_fl_using_dbreg
=
1
;
DBprintk
((
"setting fl_using_dbreg for [%d]
\n
"
,
task
->
pid
));
/*
/*
* Given debug registers cannot be used for both debugging
* Given debug registers cannot be used for both debugging
* and performance monitoring at the same time, we reuse
* and performance monitoring at the same time, we reuse
...
@@ -1980,20 +1982,27 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
...
@@ -1980,20 +1982,27 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
*/
*/
memset
(
task
->
thread
.
dbr
,
0
,
sizeof
(
task
->
thread
.
dbr
));
memset
(
task
->
thread
.
dbr
,
0
,
sizeof
(
task
->
thread
.
dbr
));
memset
(
task
->
thread
.
ibr
,
0
,
sizeof
(
task
->
thread
.
ibr
));
memset
(
task
->
thread
.
ibr
,
0
,
sizeof
(
task
->
thread
.
ibr
));
}
/*
if
(
first_time
)
{
* clear hardware registers to make sure we don't
DBprintk
((
"[%d] clearing ibrs,dbrs
\n
"
,
task
->
pid
));
* pick up stale state
/*
*/
* clear hardware registers to make sure we don't
for
(
i
=
0
;
i
<
pmu_conf
.
num_ibrs
;
i
++
)
{
* pick up stale state.
ia64_set_ibr
(
i
,
0UL
);
*
}
* for a system wide session, we do not use
ia64_srlz_i
();
* thread.dbr, thread.ibr because this process
for
(
i
=
0
;
i
<
pmu_conf
.
num_dbrs
;
i
++
)
{
* never leaves the current CPU and the state
ia64_set_dbr
(
i
,
0UL
);
* is shared by all processes running on it
}
*/
ia64_srlz_d
();
for
(
i
=
0
;
i
<
pmu_conf
.
num_ibrs
;
i
++
)
{
ia64_set_ibr
(
i
,
0UL
);
}
}
ia64_srlz_i
();
for
(
i
=
0
;
i
<
pmu_conf
.
num_dbrs
;
i
++
)
{
ia64_set_dbr
(
i
,
0UL
);
}
ia64_srlz_d
();
}
}
ret
=
-
EFAULT
;
ret
=
-
EFAULT
;
...
@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
...
@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
{
{
struct
pt_regs
*
regs
=
(
struct
pt_regs
*
)
&
stack
;
struct
pt_regs
*
regs
=
(
struct
pt_regs
*
)
&
stack
;
struct
task_struct
*
task
=
current
;
struct
task_struct
*
task
=
current
;
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
pfm_context_t
*
ctx
;
size_t
sz
;
size_t
sz
;
int
ret
=
-
ESRCH
,
narg
;
int
ret
,
narg
;
/*
/*
* reject any call if perfmon was disabled at initialization time
* reject any call if perfmon was disabled at initialization time
...
@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
...
@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
if
(
pid
!=
current
->
pid
)
{
if
(
pid
!=
current
->
pid
)
{
ret
=
-
ESRCH
;
read_lock
(
&
tasklist_lock
);
read_lock
(
&
tasklist_lock
);
task
=
find_task_by_pid
(
pid
);
task
=
find_task_by_pid
(
pid
);
...
@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
...
@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
ret
=
check_task_state
(
task
);
ret
=
check_task_state
(
task
);
if
(
ret
!=
0
)
goto
abort_call
;
if
(
ret
!=
0
)
goto
abort_call
;
}
}
ctx
=
task
->
thread
.
pfm_context
;
}
}
}
}
ctx
=
task
->
thread
.
pfm_context
;
if
(
PFM_CMD_USE_CTX
(
cmd
))
{
if
(
PFM_CMD_USE_CTX
(
cmd
))
{
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
if
(
ctx
==
NULL
)
{
if
(
ctx
==
NULL
)
{
...
@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs)
...
@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs)
static
int
static
int
perfmon_proc_info
(
char
*
page
)
perfmon_proc_info
(
char
*
page
)
{
{
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
char
*
p
=
page
;
char
*
p
=
page
;
int
i
;
int
i
;
...
@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task)
...
@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task)
}
}
static
struct
irqaction
perfmon_irqaction
=
{
static
struct
irqaction
perfmon_irqaction
=
{
handler:
perfmon_interrupt
,
.
handler
=
perfmon_interrupt
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"perfmon"
.
name
=
"perfmon"
};
};
...
@@ -4150,11 +4157,6 @@ perfmon_init (void)
...
@@ -4150,11 +4157,6 @@ perfmon_init (void)
pal_perf_mon_info_u_t
pm_info
;
pal_perf_mon_info_u_t
pm_info
;
s64
status
;
s64
status
;
register_percpu_irq
(
IA64_PERFMON_VECTOR
,
&
perfmon_irqaction
);
ia64_set_pmv
(
IA64_PERFMON_VECTOR
);
ia64_srlz_d
();
pmu_conf
.
pfm_is_disabled
=
1
;
pmu_conf
.
pfm_is_disabled
=
1
;
printk
(
"perfmon: version %u.%u (sampling format v%u.%u) IRQ %u
\n
"
,
printk
(
"perfmon: version %u.%u (sampling format v%u.%u) IRQ %u
\n
"
,
...
@@ -4232,6 +4234,9 @@ __initcall(perfmon_init);
...
@@ -4232,6 +4234,9 @@ __initcall(perfmon_init);
void
void
perfmon_init_percpu
(
void
)
perfmon_init_percpu
(
void
)
{
{
if
(
smp_processor_id
()
==
0
)
register_percpu_irq
(
IA64_PERFMON_VECTOR
,
&
perfmon_irqaction
);
ia64_set_pmv
(
IA64_PERFMON_VECTOR
);
ia64_set_pmv
(
IA64_PERFMON_VECTOR
);
ia64_srlz_d
();
ia64_srlz_d
();
}
}
...
...
arch/ia64/kernel/perfmon_itanium.h
0 → 100644
View file @
8beb1642
/*
* This file contains the Itanium PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_ITANIUM
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
static
int
pfm_ita_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
static
int
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
pfm_reg_desc_t
pmc_desc
[
256
]
=
{
/* pmc0 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc1 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc2 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc3 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc4 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc5 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc6 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc7 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc8 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc9 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc10 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
0
)
|
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc11 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
pfm_ita_pmc_check
,
{
RDEP
(
2
)
|
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc12 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc13 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_ita_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
pfm_reg_desc_t
pmd_desc
[
256
]
=
{
/* pmd0 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd1 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
0
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd2 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd3 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd4 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
}},
/* pmd5 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
}},
/* pmd6 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
}},
/* pmd7 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
}},
/* pmd8 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd9 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd10 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd11 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd12 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd13 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd14 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd15 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd16 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd17 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
3
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
int
pfm_ita_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
)
{
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
int
ret
;
/*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
13
&&
((
*
val
&
0x1
)
==
0UL
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
1
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
/*
* we must clear the (data) debug registers if pmc11.pt bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
11
&&
((
*
val
>>
28
)
&
0x1
)
==
0
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
0
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
return
0
;
}
arch/ia64/kernel/perfmon_mckinley.h
0 → 100644
View file @
8beb1642
/*
* This file contains the McKinley PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_MCKINLEY
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
static
int
pfm_mck_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
static
int
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
pfm_reg_desc_t
pmc_desc
[
256
]
=
{
/* pmc0 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc1 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc2 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc3 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc4 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
pfm_mck_pmc_check
,
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc5 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc6 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc7 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc8 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc9 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc10 */
{
PFM_REG_MONITOR
,
4
,
NULL
,
NULL
,
{
RDEP
(
0
)
|
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc11 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc12 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc13 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc14 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc15 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
pfm_reg_desc_t
pmd_desc
[
256
]
=
{
/* pmd0 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd1 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
0
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd2 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd3 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd4 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
}},
/* pmd5 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
}},
/* pmd6 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
}},
/* pmd7 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
}},
/* pmd8 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd9 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd10 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd11 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd12 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd13 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd14 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd15 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd16 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd17 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
3
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
int
pfm_mck_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
)
{
struct
thread_struct
*
th
=
&
task
->
thread
;
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
int
ret
=
0
,
check_case1
=
0
;
unsigned
long
val8
=
0
,
val14
=
0
,
val13
=
0
;
/*
* we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
13
&&
(
*
val
&
(
0xfUL
<<
45
))
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
1
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
14
&&
((
*
val
&
0x2222
)
!=
0x2222
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
0
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
switch
(
cnum
)
{
case
4
:
*
val
|=
1UL
<<
23
;
/* force power enable bit */
break
;
case
8
:
val8
=
*
val
;
val13
=
th
->
pmc
[
13
];
val14
=
th
->
pmc
[
14
];
check_case1
=
1
;
break
;
case
13
:
val8
=
th
->
pmc
[
8
];
val13
=
*
val
;
val14
=
th
->
pmc
[
14
];
check_case1
=
1
;
break
;
case
14
:
val8
=
th
->
pmc
[
13
];
val13
=
th
->
pmc
[
13
];
val14
=
*
val
;
check_case1
=
1
;
break
;
}
/* check illegal configuration which can produce inconsistencies in tagging
* i-side events in L1D and L2 caches
*/
if
(
check_case1
)
{
ret
=
((
val13
>>
45
)
&
0xf
)
==
0
&&
((
val8
&
0x1
)
==
0
)
&&
((((
val14
>>
1
)
&
0x3
)
==
0x2
||
((
val14
>>
1
)
&
0x3
)
==
0x0
)
||
(((
val14
>>
4
)
&
0x3
)
==
0x2
||
((
val14
>>
4
)
&
0x3
)
==
0x0
));
if
(
ret
)
printk
(
"perfmon: failure check_case1
\n
"
);
}
return
ret
?
-
EINVAL
:
0
;
}
arch/ia64/kernel/process.c
View file @
8beb1642
...
@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags,
...
@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy parts of thread_struct: */
/* copy parts of thread_struct: */
p
->
thread
.
ksp
=
(
unsigned
long
)
child_stack
-
16
;
p
->
thread
.
ksp
=
(
unsigned
long
)
child_stack
-
16
;
/* stop some PSR bits from being inherited: */
child_ptregs
->
cr_ipsr
=
((
child_ptregs
->
cr_ipsr
|
IA64_PSR_BITS_TO_SET
)
&
~
IA64_PSR_BITS_TO_CLEAR
);
/*
/*
* NOTE: The calling convention considers all floating point
* NOTE: The calling convention considers all floating point
* registers in the high partition (fph) to be scratch. Since
* registers in the high partition (fph) to be scratch. Since
...
...
arch/ia64/kernel/setup.c
View file @
8beb1642
...
@@ -455,10 +455,10 @@ c_stop (struct seq_file *m, void *v)
...
@@ -455,10 +455,10 @@ c_stop (struct seq_file *m, void *v)
}
}
struct
seq_operations
cpuinfo_op
=
{
struct
seq_operations
cpuinfo_op
=
{
start:
c_start
,
.
start
=
c_start
,
next:
c_next
,
.
next
=
c_next
,
stop:
c_stop
,
.
stop
=
c_stop
,
show:
show_cpuinfo
.
show
=
show_cpuinfo
};
};
void
void
...
@@ -542,7 +542,18 @@ cpu_init (void)
...
@@ -542,7 +542,18 @@ cpu_init (void)
extern
char
__per_cpu_end
[];
extern
char
__per_cpu_end
[];
int
cpu
=
smp_processor_id
();
int
cpu
=
smp_processor_id
();
my_cpu_data
=
alloc_bootmem_pages
(
__per_cpu_end
-
__per_cpu_start
);
if
(
__per_cpu_end
-
__per_cpu_start
>
PAGE_SIZE
)
panic
(
"Per-cpu data area too big! (%Zu > %Zu)"
,
__per_cpu_end
-
__per_cpu_start
,
PAGE_SIZE
);
/*
* On the BSP, the page allocator isn't initialized by the time we get here. On
* the APs, the bootmem allocator is no longer available...
*/
if
(
cpu
==
0
)
my_cpu_data
=
alloc_bootmem_pages
(
__per_cpu_end
-
__per_cpu_start
);
else
my_cpu_data
=
(
void
*
)
get_free_page
(
GFP_KERNEL
);
memcpy
(
my_cpu_data
,
__phys_per_cpu_start
,
__per_cpu_end
-
__per_cpu_start
);
memcpy
(
my_cpu_data
,
__phys_per_cpu_start
,
__per_cpu_end
-
__per_cpu_start
);
__per_cpu_offset
[
cpu
]
=
(
char
*
)
my_cpu_data
-
__per_cpu_start
;
__per_cpu_offset
[
cpu
]
=
(
char
*
)
my_cpu_data
-
__per_cpu_start
;
my_cpu_info
=
my_cpu_data
+
((
char
*
)
&
cpu_info
-
__per_cpu_start
);
my_cpu_info
=
my_cpu_data
+
((
char
*
)
&
cpu_info
-
__per_cpu_start
);
...
...
arch/ia64/kernel/signal.c
View file @
8beb1642
...
@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
...
@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
if
(
from
->
si_code
<
0
)
{
if
(
from
->
si_code
<
0
)
{
if
(
__copy_to_user
(
to
,
from
,
sizeof
(
siginfo_t
)))
if
(
__copy_to_user
(
to
,
from
,
sizeof
(
siginfo_t
)))
return
-
EFAULT
;
return
-
EFAULT
;
return
0
;
}
else
{
}
else
{
int
err
;
int
err
;
...
...
arch/ia64/kernel/smpboot.c
View file @
8beb1642
...
@@ -425,7 +425,7 @@ do_boot_cpu (int sapicid)
...
@@ -425,7 +425,7 @@ do_boot_cpu (int sapicid)
task_for_booting_cpu
=
idle
;
task_for_booting_cpu
=
idle
;
Dprintk
(
"Sending wakeup vector %u to AP 0x%x/0x%x.
\n
"
,
ap_wakeup_vector
,
cpu
,
sapicid
);
Dprintk
(
"Sending wakeup vector %
l
u to AP 0x%x/0x%x.
\n
"
,
ap_wakeup_vector
,
cpu
,
sapicid
);
platform_send_ipi
(
cpu
,
ap_wakeup_vector
,
IA64_IPI_DM_INT
,
0
);
platform_send_ipi
(
cpu
,
ap_wakeup_vector
,
IA64_IPI_DM_INT
,
0
);
...
@@ -537,7 +537,7 @@ smp_boot_cpus (void)
...
@@ -537,7 +537,7 @@ smp_boot_cpus (void)
printk
(
"Before bogomips.
\n
"
);
printk
(
"Before bogomips.
\n
"
);
if
(
!
cpucount
)
{
if
(
!
cpucount
)
{
printk
(
KERN_
ERR
"Error
: only one processor found.
\n
"
);
printk
(
KERN_
WARNING
"Warning
: only one processor found.
\n
"
);
}
else
{
}
else
{
unsigned
long
bogosum
=
0
;
unsigned
long
bogosum
=
0
;
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
...
...
arch/ia64/kernel/time.c
View file @
8beb1642
...
@@ -41,21 +41,22 @@ do_profile (unsigned long ip)
...
@@ -41,21 +41,22 @@ do_profile (unsigned long ip)
extern
unsigned
long
prof_cpu_mask
;
extern
unsigned
long
prof_cpu_mask
;
extern
char
_stext
;
extern
char
_stext
;
if
(
!
prof_buffer
)
return
;
if
(
!
((
1UL
<<
smp_processor_id
())
&
prof_cpu_mask
))
if
(
!
((
1UL
<<
smp_processor_id
())
&
prof_cpu_mask
))
return
;
return
;
if
(
prof_buffer
&&
current
->
pid
)
{
ip
-=
(
unsigned
long
)
&
_stext
;
ip
-=
(
unsigned
long
)
&
_stext
;
ip
>>=
prof_shift
;
ip
>>=
prof_shift
;
/*
/*
* Don't ignore out-of-bounds IP values silently, put them into the last
* Don't ignore out-of-bounds IP values silently, put them into the last
* histogram slot, so if present, they will show up as a sharp peak.
* histogram slot, so if present, they will show up as a sharp peak.
*/
*/
if
(
ip
>
prof_len
-
1
)
if
(
ip
>
prof_len
-
1
)
ip
=
prof_len
-
1
;
ip
=
prof_len
-
1
;
atomic_inc
((
atomic_t
*
)
&
prof_buffer
[
ip
]);
atomic_inc
((
atomic_t
*
)
&
prof_buffer
[
ip
]);
}
}
}
/*
/*
...
@@ -285,9 +286,9 @@ ia64_init_itm (void)
...
@@ -285,9 +286,9 @@ ia64_init_itm (void)
}
}
static
struct
irqaction
timer_irqaction
=
{
static
struct
irqaction
timer_irqaction
=
{
handler:
timer_interrupt
,
.
handler
=
timer_interrupt
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"timer"
.
name
=
"timer"
};
};
void
__init
void
__init
...
...
arch/ia64/kernel/traps.c
View file @
8beb1642
...
@@ -93,9 +93,9 @@ die (const char *str, struct pt_regs *regs, long err)
...
@@ -93,9 +93,9 @@ die (const char *str, struct pt_regs *regs, long err)
int
lock_owner
;
int
lock_owner
;
int
lock_owner_depth
;
int
lock_owner_depth
;
}
die
=
{
}
die
=
{
lock:
SPIN_LOCK_UNLOCKED
,
.
lock
=
SPIN_LOCK_UNLOCKED
,
lock_owner:
-
1
,
.
lock_owner
=
-
1
,
lock_owner_depth:
0
.
lock_owner_depth
=
0
};
};
if
(
die
.
lock_owner
!=
smp_processor_id
())
{
if
(
die
.
lock_owner
!=
smp_processor_id
())
{
...
@@ -435,7 +435,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
...
@@ -435,7 +435,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned
long
code
,
error
=
isr
;
unsigned
long
code
,
error
=
isr
;
struct
siginfo
siginfo
;
struct
siginfo
siginfo
;
char
buf
[
128
];
char
buf
[
128
];
int
result
;
int
result
,
sig
;
static
const
char
*
reason
[]
=
{
static
const
char
*
reason
[]
=
{
"IA-64 Illegal Operation fault"
,
"IA-64 Illegal Operation fault"
,
"IA-64 Privileged Operation fault"
,
"IA-64 Privileged Operation fault"
,
...
@@ -479,6 +479,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
...
@@ -479,6 +479,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break
;
break
;
case
26
:
/* NaT Consumption */
case
26
:
/* NaT Consumption */
if
(
user_mode
(
regs
))
{
if
(((
isr
>>
4
)
&
0xf
)
==
2
)
{
/* NaT page consumption */
sig
=
SIGSEGV
;
code
=
SEGV_ACCERR
;
}
else
{
/* register NaT consumption */
sig
=
SIGILL
;
code
=
ILL_ILLOPN
;
}
siginfo
.
si_signo
=
sig
;
siginfo
.
si_code
=
code
;
siginfo
.
si_errno
=
0
;
siginfo
.
si_addr
=
(
void
*
)
(
regs
->
cr_iip
+
ia64_psr
(
regs
)
->
ri
);
siginfo
.
si_imm
=
vector
;
siginfo
.
si_flags
=
__ISR_VALID
;
siginfo
.
si_isr
=
isr
;
force_sig_info
(
sig
,
&
siginfo
,
current
);
return
;
}
else
if
(
done_with_exception
(
regs
))
return
;
sprintf
(
buf
,
"NaT consumption"
);
break
;
case
31
:
/* Unsupported Data Reference */
case
31
:
/* Unsupported Data Reference */
if
(
user_mode
(
regs
))
{
if
(
user_mode
(
regs
))
{
siginfo
.
si_signo
=
SIGILL
;
siginfo
.
si_signo
=
SIGILL
;
...
@@ -491,7 +515,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
...
@@ -491,7 +515,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
force_sig_info
(
SIGILL
,
&
siginfo
,
current
);
force_sig_info
(
SIGILL
,
&
siginfo
,
current
);
return
;
return
;
}
}
sprintf
(
buf
,
(
vector
==
26
)
?
"NaT consumption"
:
"Unsupported data reference"
);
sprintf
(
buf
,
"Unsupported data reference"
);
break
;
break
;
case
29
:
/* Debug */
case
29
:
/* Debug */
...
@@ -508,16 +532,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
...
@@ -508,16 +532,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if
(
ia64_psr
(
regs
)
->
is
==
0
)
if
(
ia64_psr
(
regs
)
->
is
==
0
)
ifa
=
regs
->
cr_iip
;
ifa
=
regs
->
cr_iip
;
#endif
#endif
siginfo
.
si_addr
=
(
void
*
)
ifa
;
break
;
break
;
case
35
:
siginfo
.
si_code
=
TRAP_BRANCH
;
break
;
case
35
:
siginfo
.
si_code
=
TRAP_BRANCH
;
ifa
=
0
;
break
;
case
36
:
siginfo
.
si_code
=
TRAP_TRACE
;
break
;
case
36
:
siginfo
.
si_code
=
TRAP_TRACE
;
ifa
=
0
;
break
;
}
}
siginfo
.
si_signo
=
SIGTRAP
;
siginfo
.
si_signo
=
SIGTRAP
;
siginfo
.
si_errno
=
0
;
siginfo
.
si_errno
=
0
;
siginfo
.
si_flags
=
0
;
siginfo
.
si_flags
=
0
;
siginfo
.
si_isr
=
0
;
siginfo
.
si_isr
=
0
;
siginfo
.
si_addr
=
0
;
siginfo
.
si_addr
=
(
void
*
)
ifa
;
siginfo
.
si_imm
=
0
;
siginfo
.
si_imm
=
0
;
force_sig_info
(
SIGTRAP
,
&
siginfo
,
current
);
force_sig_info
(
SIGTRAP
,
&
siginfo
,
current
);
return
;
return
;
...
...
arch/ia64/kernel/unwind.c
View file @
8beb1642
...
@@ -140,13 +140,13 @@ static struct {
...
@@ -140,13 +140,13 @@ static struct {
}
stat
;
}
stat
;
# endif
# endif
}
unw
=
{
}
unw
=
{
tables:
&
unw
.
kernel_table
,
.
tables
=
&
unw
.
kernel_table
,
lock:
SPIN_LOCK_UNLOCKED
,
.
lock
=
SPIN_LOCK_UNLOCKED
,
save_order:
{
.
save_order
=
{
UNW_REG_RP
,
UNW_REG_PFS
,
UNW_REG_PSP
,
UNW_REG_PR
,
UNW_REG_RP
,
UNW_REG_PFS
,
UNW_REG_PSP
,
UNW_REG_PR
,
UNW_REG_UNAT
,
UNW_REG_LC
,
UNW_REG_FPSR
,
UNW_REG_PRI_UNAT_GR
UNW_REG_UNAT
,
UNW_REG_LC
,
UNW_REG_FPSR
,
UNW_REG_PRI_UNAT_GR
},
},
preg_index:
{
.
preg_index
=
{
struct_offset
(
struct
unw_frame_info
,
pri_unat_loc
)
/
8
,
/* PRI_UNAT_GR */
struct_offset
(
struct
unw_frame_info
,
pri_unat_loc
)
/
8
,
/* PRI_UNAT_GR */
struct_offset
(
struct
unw_frame_info
,
pri_unat_loc
)
/
8
,
/* PRI_UNAT_MEM */
struct_offset
(
struct
unw_frame_info
,
pri_unat_loc
)
/
8
,
/* PRI_UNAT_MEM */
struct_offset
(
struct
unw_frame_info
,
bsp_loc
)
/
8
,
struct_offset
(
struct
unw_frame_info
,
bsp_loc
)
/
8
,
...
@@ -189,9 +189,9 @@ static struct {
...
@@ -189,9 +189,9 @@ static struct {
struct_offset
(
struct
unw_frame_info
,
fr_loc
[
30
-
16
])
/
8
,
struct_offset
(
struct
unw_frame_info
,
fr_loc
[
30
-
16
])
/
8
,
struct_offset
(
struct
unw_frame_info
,
fr_loc
[
31
-
16
])
/
8
,
struct_offset
(
struct
unw_frame_info
,
fr_loc
[
31
-
16
])
/
8
,
},
},
hash
:
{
[
0
...
UNW_HASH_SIZE
-
1
]
=
-
1
},
.
hash
=
{
[
0
...
UNW_HASH_SIZE
-
1
]
=
-
1
},
#if UNW_DEBUG
#if UNW_DEBUG
preg_name:
{
.
preg_name
=
{
"pri_unat_gr"
,
"pri_unat_mem"
,
"bsp"
,
"bspstore"
,
"ar.pfs"
,
"ar.rnat"
,
"psp"
,
"rp"
,
"pri_unat_gr"
,
"pri_unat_mem"
,
"bsp"
,
"bspstore"
,
"ar.pfs"
,
"ar.rnat"
,
"psp"
,
"rp"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"ar.unat"
,
"pr"
,
"ar.lc"
,
"ar.fpsr"
,
"ar.unat"
,
"pr"
,
"ar.lc"
,
"ar.fpsr"
,
...
@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize,
...
@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize,
for
(
reg
=
hi
;
reg
>=
lo
;
--
reg
)
{
for
(
reg
=
hi
;
reg
>=
lo
;
--
reg
)
{
if
(
reg
->
where
==
UNW_WHERE_SPILL_HOME
)
{
if
(
reg
->
where
==
UNW_WHERE_SPILL_HOME
)
{
reg
->
where
=
UNW_WHERE_PSPREL
;
reg
->
where
=
UNW_WHERE_PSPREL
;
reg
->
val
=
0x10
-
*
offp
;
*
offp
-=
regsize
;
*
offp
+=
regsize
;
reg
->
val
=
*
offp
;
}
}
}
}
}
}
...
@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s
...
@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s
}
}
for
(
i
=
0
;
i
<
20
;
++
i
)
{
for
(
i
=
0
;
i
<
20
;
++
i
)
{
if
((
frmask
&
1
)
!=
0
)
{
if
((
frmask
&
1
)
!=
0
)
{
set_reg
(
sr
->
curr
.
reg
+
UNW_REG_F2
+
i
,
UNW_WHERE_SPILL_HOME
,
int
base
=
(
i
<
4
)
?
UNW_REG_F2
:
UNW_REG_F16
-
4
;
set_reg
(
sr
->
curr
.
reg
+
base
+
i
,
UNW_WHERE_SPILL_HOME
,
sr
->
region_start
+
sr
->
region_len
-
1
,
0
);
sr
->
region_start
+
sr
->
region_len
-
1
,
0
);
sr
->
any_spills
=
1
;
sr
->
any_spills
=
1
;
}
}
...
...
arch/ia64/lib/Makefile
View file @
8beb1642
...
@@ -9,12 +9,12 @@ export-objs := io.o swiotlb.o
...
@@ -9,12 +9,12 @@ export-objs := io.o swiotlb.o
obj-y
:=
__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o
\
obj-y
:=
__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o
\
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
\
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
\
checksum.o clear_page.o csum_partial_copy.o copy_page.o
\
checksum.o clear_page.o csum_partial_copy.o copy_page.o
\
c
opy_user.o c
lear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o
\
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o
\
flush.o io.o ip_fast_csum.o do_csum.o
\
flush.o io.o ip_fast_csum.o do_csum.o
\
mem
cpy.o mem
set.o strlen.o swiotlb.o
memset.o strlen.o swiotlb.o
obj-$(CONFIG_ITANIUM)
+=
copy_page.o
obj-$(CONFIG_ITANIUM)
+=
copy_page.o
copy_user.o memcpy.o
obj-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o
obj-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o
memcpy_mck.o
IGNORE_FLAGS_OBJS
=
__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o
\
IGNORE_FLAGS_OBJS
=
__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o
\
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
...
...
arch/ia64/lib/copy_user.S
View file @
8beb1642
...
@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user)
...
@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user)
.
copy_user_bit
##
rshift
:
\
.
copy_user_bit
##
rshift
:
\
1
:
\
1
:
\
EX
(.
failure_out
,(
EPI
)
st8
[
dst1
]=
tmp
,
8
)
; \
EX
(.
failure_out
,(
EPI
)
st8
[
dst1
]=
tmp
,
8
)
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
3
],
val1
[
PIPE_DEPTH
-
2
],
rshift
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
2
],
val1
[
PIPE_DEPTH
-
1
],
rshift
; \
EX
(3
f
,(
p16
)
ld8
val1
[
0
]=[
src1
],
8
)
; \
EX
(3
f
,(
p16
)
ld8
val1
[
1
]=[
src1
],
8
)
; \
(
p16
)
mov
val1
[
0
]=
r0
; \
br.ctop.dptk
1
b
; \
br.ctop.dptk
1
b
; \
;; \
;; \
br.cond.sptk.many
.
diff_align_do_tail
; \
br.cond.sptk.many
.
diff_align_do_tail
; \
2
:
\
2
:
\
(
EPI
)
st8
[
dst1
]=
tmp
,
8
; \
(
EPI
)
st8
[
dst1
]=
tmp
,
8
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
3
],
val1
[
PIPE_DEPTH
-
2
],
rshift
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
2
],
val1
[
PIPE_DEPTH
-
1
],
rshift
; \
3
:
\
3
:
\
(
p16
)
mov
val1
[
1
]=
r0
; \
(
p16
)
mov
val1
[
0
]=
r0
; \
(
p16
)
mov
val1
[
0
]=
r0
; \
br.ctop.dptk
2
b
; \
br.ctop.dptk
2
b
; \
;; \
;; \
...
...
arch/ia64/lib/io.c
View file @
8beb1642
...
@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port)
...
@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl
(
val
,
port
);
__ia64_outl
(
val
,
port
);
}
}
void
ia64_mmiob
(
void
)
{
__ia64_mmiob
();
}
/* define aliases: */
/* define aliases: */
asm
(
".global __ia64_inb, __ia64_inw, __ia64_inl"
);
asm
(
".global __ia64_inb, __ia64_inw, __ia64_inl"
);
...
@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb");
...
@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb");
asm
(
"__ia64_outw = ia64_outw"
);
asm
(
"__ia64_outw = ia64_outw"
);
asm
(
"__ia64_outl = ia64_outl"
);
asm
(
"__ia64_outl = ia64_outl"
);
asm
(
".global __ia64_mmiob"
);
asm
(
"__ia64_mmiob = ia64_mmiob"
);
#endif
/* CONFIG_IA64_GENERIC */
#endif
/* CONFIG_IA64_GENERIC */
arch/ia64/lib/memcpy_mck.S
0 → 100644
View file @
8beb1642
/*
*
Itanium
2
-
optimized
version
of
memcpy
and
copy_user
function
*
*
Inputs
:
*
in0
:
destination
address
*
in1
:
source
address
*
in2
:
number
of
bytes
to
copy
*
Output
:
*
0
if
success
,
or
number
of
byte
NOT
copied
if
error
occurred
.
*
*
Copyright
(
C
)
2002
Intel
Corp
.
*
Copyright
(
C
)
2002
Ken
Chen
<
kenneth
.
w
.
chen
@
intel
.
com
>
*/
#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
#if __GNUC__ >= 3
# define EK(y...) EX(y)
#else
# define EK(y,x...) x
#endif
GLOBAL_ENTRY
(
bcopy
)
.
regstk
3
,
0
,
0
,
0
mov
r8
=
in0
mov
in0
=
in1
;;
mov
in1
=
r8
;;
END
(
bcopy
)
/*
McKinley
specific
optimization
*/
#define retval r8
#define saved_pfs r31
#define saved_lc r10
#define saved_pr r11
#define saved_in0 r14
#define saved_in1 r15
#define saved_in2 r16
#define src0 r2
#define src1 r3
#define dst0 r17
#define dst1 r18
#define cnt r9
/*
r19
-
r30
are
temp
for
each
code
section
*/
#define PREFETCH_DIST 8
#define src_pre_mem r19
#define dst_pre_mem r20
#define src_pre_l2 r21
#define dst_pre_l2 r22
#define t1 r23
#define t2 r24
#define t3 r25
#define t4 r26
#define t5 t1 // alias!
#define t6 t2 // alias!
#define t7 t3 // alias!
#define n8 r27
#define t9 t5 // alias!
#define t10 t4 // alias!
#define t11 t7 // alias!
#define t12 t6 // alias!
#define t14 t10 // alias!
#define t13 r28
#define t15 r29
#define tmp r30
/*
defines
for
long_copy
block
*/
#define A 0
#define B (PREFETCH_DIST)
#define C (B + PREFETCH_DIST)
#define D (C + 1)
#define N (D + 1)
#define Nrot ((N + 7) & ~7)
/*
alias
*/
#define in0 r32
#define in1 r33
#define in2 r34
GLOBAL_ENTRY
(
memcpy
)
and
r28
=
0x7
,
in0
and
r29
=
0x7
,
in1
mov
f6
=
f0
br.cond.sptk
.
common_code
;;
GLOBAL_ENTRY
(
__copy_user
)
.
prologue
//
check
dest
alignment
and
r28
=
0x7
,
in0
and
r29
=
0x7
,
in1
mov
f6
=
f1
mov
saved_in0
=
in0
//
save
dest
pointer
mov
saved_in1
=
in1
//
save
src
pointer
mov
saved_in2
=
in2
//
save
len
;;
.
common_code
:
cmp.gt
p15
,
p0
=
8
,
in2
//
check
for
small
size
cmp.ne
p13
,
p0
=
0
,
r28
//
check
dest
alignment
cmp.ne
p14
,
p0
=
0
,
r29
//
check
src
alignment
add
src0
=
0
,
in1
sub
r30
=
8
,
r28
//
for
.
align_dest
mov
retval
=
r0
//
initialize
return
value
;;
add
dst0
=
0
,
in0
add
dst1
=
1
,
in0
//
dest
odd
index
cmp.le
p6
,
p0
=
1
,
r30
//
for
.
align_dest
(
p15
)
br.cond.dpnt
.
memcpy_short
(
p13
)
br.cond.dpnt
.
align_dest
(
p14
)
br.cond.dpnt
.
unaligned_src
;;
//
both
dest
and
src
are
aligned
on
8
-
byte
boundary
.
aligned_src
:
.
save
ar
.
pfs
,
saved_pfs
alloc
saved_pfs
=
ar
.
pfs
,
3
,
Nrot
-
3
,
0
,
Nrot
.
save
pr
,
saved_pr
mov
saved_pr
=
pr
shr.u
cnt
=
in2
,
7
//
this
much
cache
line
;;
cmp.lt
p6
,
p0
=
2
*
PREFETCH_DIST
,
cnt
cmp.lt
p7
,
p8
=
1
,
cnt
.
save
ar
.
lc
,
saved_lc
mov
saved_lc
=
ar
.
lc
.
body
add
cnt
=-
1
,
cnt
add
src_pre_mem
=
0
,
in1
//
prefetch
src
pointer
add
dst_pre_mem
=
0
,
in0
//
prefetch
dest
pointer
;;
(
p7
)
mov
ar
.
lc
=
cnt
//
prefetch
count
(
p8
)
mov
ar
.
lc
=
r0
(
p6
)
br.cond.dpnt
.
long_copy
;;
.
prefetch
:
lfetch.fault
[
src_pre_mem
],
128
lfetch.fault.excl
[
dst_pre_mem
],
128
br.cloop.dptk.few
.
prefetch
;;
.
medium_copy
:
and
tmp
=
31
,
in2
//
copy
length
after
iteration
shr.u
r29
=
in2
,
5
//
number
of
32
-
byte
iteration
add
dst1
=
8
,
dst0
//
2
nd
dest
pointer
;;
add
cnt
=-
1
,
r29
//
ctop
iteration
adjustment
cmp.eq
p10
,
p0
=
r29
,
r0
//
do
we
really
need
to
loop
?
add
src1
=
8
,
src0
//
2
nd
src
pointer
cmp.le
p6
,
p0
=
8
,
tmp
;;
cmp.le
p7
,
p0
=
16
,
tmp
mov
ar
.
lc
=
cnt
//
loop
setup
cmp.eq
p16
,
p17
=
r0
,
r0
mov
ar
.
ec
=
2
(
p10
)
br.dpnt.few
.
aligned_src_tail
;;
.
align
32
1
:
EX
(.
ex_handler
,
(
p16
)
ld8
r34
=[
src0
],
16
)
EK
(.
ex_handler
,
(
p16
)
ld8
r38
=[
src1
],
16
)
EX
(.
ex_handler
,
(
p17
)
st8
[
dst0
]=
r33
,
16
)
EK
(.
ex_handler
,
(
p17
)
st8
[
dst1
]=
r37
,
16
)
;;
EX
(.
ex_handler
,
(
p16
)
ld8
r32
=[
src0
],
16
)
EK
(.
ex_handler
,
(
p16
)
ld8
r36
=[
src1
],
16
)
EX
(.
ex_handler
,
(
p16
)
st8
[
dst0
]=
r34
,
16
)
EK
(.
ex_handler
,
(
p16
)
st8
[
dst1
]=
r38
,
16
)
br.ctop.dptk.few
1
b
;;
.
aligned_src_tail
:
EX
(.
ex_handler
,
(
p6
)
ld8
t1
=[
src0
])
mov
ar
.
lc
=
saved_lc
mov
ar
.
pfs
=
saved_pfs
EX
(.
ex_hndlr_s
,
(
p7
)
ld8
t2
=[
src1
],
8
)
cmp.le
p8
,
p0
=
24
,
tmp
and
r21
=-
8
,
tmp
;;
EX
(.
ex_hndlr_s
,
(
p8
)
ld8
t3
=[
src1
])
EX
(.
ex_handler
,
(
p6
)
st8
[
dst0
]=
t1
)
//
store
byte
1
and
in2
=
7
,
tmp
//
remaining
length
EX
(.
ex_hndlr_d
,
(
p7
)
st8
[
dst1
]=
t2
,
8
)
//
store
byte
2
add
src0
=
src0
,
r21
//
setting
up
src
pointer
add
dst0
=
dst0
,
r21
//
setting
up
dest
pointer
;;
EX
(.
ex_handler
,
(
p8
)
st8
[
dst1
]=
t3
)
//
store
byte
3
mov
pr
=
saved_pr
,-
1
br.dptk.many
.
memcpy_short
;;
/*
code
taken
from
copy_page_mck
*/
.
long_copy
:
.
rotr
v
[
2
*
PREFETCH_DIST
]
.
rotp
p
[
N
]
mov
src_pre_mem
=
src0
mov
pr
.
rot
=
0x10000
mov
ar
.
ec
=
1
//
special
unrolled
loop
mov
dst_pre_mem
=
dst0
add
src_pre_l2
=
8
*
8
,
src0
add
dst_pre_l2
=
8
*
8
,
dst0
;;
add
src0
=
8
,
src_pre_mem
//
first
t1
src
mov
ar
.
lc
=
2
*
PREFETCH_DIST
-
1
shr.u
cnt
=
in2
,
7
//
number
of
lines
add
src1
=
3
*
8
,
src_pre_mem
//
first
t3
src
add
dst0
=
8
,
dst_pre_mem
//
first
t1
dst
add
dst1
=
3
*
8
,
dst_pre_mem
//
first
t3
dst
;;
and
tmp
=
127
,
in2
//
remaining
bytes
after
this
block
add
cnt
=
-(
2
*
PREFETCH_DIST
)
-
1
,
cnt
//
same
as
.
line_copy
loop
,
but
with
all
predicated
-
off
instructions
removed
:
.
prefetch_loop
:
EX
(.
ex_hndlr_lcpy_1
,
(
p
[
A
])
ld8
v
[
A
]
=
[
src_pre_mem
],
128
)
//
M0
EK
(.
ex_hndlr_lcpy_1
,
(
p
[
B
])
st8
[
dst_pre_mem
]
=
v
[
B
],
128
)
//
M2
br.ctop.sptk
.
prefetch_loop
;;
cmp.eq
p16
,
p0
=
r0
,
r0
//
reset
p16
to
1
mov
ar
.
lc
=
cnt
mov
ar
.
ec
=
N
//
#
of
stages
in
pipeline
;;
.
line_copy
:
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t2
=
[
src0
],
3
*
8
)
//
M0
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t4
=
[
src1
],
3
*
8
)
//
M1
EX
(.
ex_handler_lcpy
,
(
p
[
B
])
st8
[
dst_pre_mem
]
=
v
[
B
],
128
)
//
M2
prefetch
dst
from
memory
EK
(.
ex_handler_lcpy
,
(
p
[
D
])
st8
[
dst_pre_l2
]
=
n8
,
128
)
//
M3
prefetch
dst
from
L2
;;
EX
(.
ex_handler_lcpy
,
(
p
[
A
])
ld8
v
[
A
]
=
[
src_pre_mem
],
128
)
//
M0
prefetch
src
from
memory
EK
(.
ex_handler_lcpy
,
(
p
[
C
])
ld8
n8
=
[
src_pre_l2
],
128
)
//
M1
prefetch
src
from
L2
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t1
,
8
)
//
M2
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t3
,
8
)
//
M3
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t5
=
[
src0
],
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t7
=
[
src1
],
3
*
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t2
,
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t4
,
3
*
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t6
=
[
src0
],
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t10
=
[
src1
],
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t5
,
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t7
,
3
*
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t9
=
[
src0
],
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t11
=
[
src1
],
3
*
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t6
,
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t10
,
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t12
=
[
src0
],
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t14
=
[
src1
],
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t9
,
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t11
,
3
*
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t13
=
[
src0
],
4
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t15
=
[
src1
],
4
*
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t12
,
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t14
,
8
)
;;
EX
(.
ex_handler
,
(
p
[
C
])
ld8
t1
=
[
src0
],
8
)
EK
(.
ex_handler
,
(
p
[
C
])
ld8
t3
=
[
src1
],
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t13
,
4
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t15
,
4
*
8
)
br.ctop.sptk
.
line_copy
;;
add
dst0
=-
8
,
dst0
add
src0
=-
8
,
src0
mov
in2
=
tmp
.
restore
sp
br.sptk.many
.
medium_copy
;;
#define BLOCK_SIZE 128*32
#define blocksize r23
#define curlen r24
//
dest
is
on
8
-
byte
boundary
,
src
is
not
.
We
need
to
do
//
ld8
-
ld8
,
shrp
,
then
st8
.
Max
8
byte
copy
per
cycle
.
.
unaligned_src
:
.
prologue
.
save
ar
.
pfs
,
saved_pfs
alloc
saved_pfs
=
ar
.
pfs
,
3
,
5
,
0
,
8
.
save
ar
.
lc
,
saved_lc
mov
saved_lc
=
ar
.
lc
.
save
pr
,
saved_pr
mov
saved_pr
=
pr
.
body
.
4k_block
:
mov
saved_in0
=
dst0
//
need
to
save
all
input
arguments
mov
saved_in2
=
in2
mov
blocksize
=
BLOCK_SIZE
;;
cmp.lt
p6
,
p7
=
blocksize
,
in2
mov
saved_in1
=
src0
;;
(
p6
)
mov
in2
=
blocksize
;;
shr.u
r21
=
in2
,
7
//
this
much
cache
line
shr.u
r22
=
in2
,
4
//
number
of
16
-
byte
iteration
and
curlen
=
15
,
in2
//
copy
length
after
iteration
and
r30
=
7
,
src0
//
source
alignment
;;
cmp.lt
p7
,
p8
=
1
,
r21
add
cnt
=-
1
,
r21
;;
add
src_pre_mem
=
0
,
src0
//
prefetch
src
pointer
add
dst_pre_mem
=
0
,
dst0
//
prefetch
dest
pointer
and
src0
=-
8
,
src0
//
1
st
src
pointer
(
p7
)
mov
ar
.
lc
=
r21
(
p8
)
mov
ar
.
lc
=
r0
;;
.
align
32
1
:
lfetch.fault
[
src_pre_mem
],
128
lfetch.fault.excl
[
dst_pre_mem
],
128
br.cloop.dptk.few
1
b
;;
shladd
dst1
=
r22
,
3
,
dst0
//
2
nd
dest
pointer
shladd
src1
=
r22
,
3
,
src0
//
2
nd
src
pointer
cmp.eq
p8
,
p9
=
r22
,
r0
//
do
we
really
need
to
loop
?
cmp.le
p6
,
p7
=
8
,
curlen
; // have at least 8 byte remaining?
add
cnt
=-
1
,
r22
//
ctop
iteration
adjustment
;;
EX
(.
ex_handler
,
(
p9
)
ld8
r33
=[
src0
],
8
)
//
loop
primer
EK
(.
ex_handler
,
(
p9
)
ld8
r37
=[
src1
],
8
)
(
p8
)
br.dpnt.few
.
noloop
;;
//
The
jump
address
is
calculated
based
on
src
alignment
.
The
COPYU
//
macro
below
need
to
confine
its
size
to
power
of
two
,
so
an
entry
//
can
be
caulated
using
shl
instead
of
an
expensive
multiply
.
The
//
size
is
then
hard
coded
by
the
following
#
define
to
match
the
//
actual
size
.
This
make
it
somewhat
tedious
when
COPYU
macro
gets
//
changed
and
this
need
to
be
adjusted
to
match
.
#define LOOP_SIZE 6
1
:
mov
r29
=
ip
//
jmp_table
thread
mov
ar
.
lc
=
cnt
;;
add
r29
=
.
jump_table
-
1
b
-
(
.
jmp1
-
.
jump_table
),
r29
shl
r28
=
r30
,
LOOP_SIZE
//
jmp_table
thread
mov
ar
.
ec
=
2
//
loop
setup
;;
add
r29
=
r29
,
r28
//
jmp_table
thread
cmp.eq
p16
,
p17
=
r0
,
r0
;;
mov
b6
=
r29
//
jmp_table
thread
;;
br.cond.sptk.few
b6
//
for
8
-
15
byte
case
//
We
will
skip
the
loop
,
but
need
to
replicate
the
side
effect
//
that
the
loop
produces
.
.
noloop
:
EX
(.
ex_handler
,
(
p6
)
ld8
r37
=[
src1
],
8
)
add
src0
=
8
,
src0
(
p6
)
shl
r25
=
r30
,
3
;;
EX
(.
ex_handler
,
(
p6
)
ld8
r27
=[
src1
])
(
p6
)
shr.u
r28
=
r37
,
r25
(
p6
)
sub
r26
=
64
,
r25
;;
(
p6
)
shl
r27
=
r27
,
r26
;;
(
p6
)
or
r21
=
r28
,
r27
.
unaligned_src_tail
:
/*
check
if
we
have
more
than
blocksize
to
copy
,
if
so
go
back
*/
cmp.gt
p8
,
p0
=
saved_in2
,
blocksize
;;
(
p8
)
add
dst0
=
saved_in0
,
blocksize
(
p8
)
add
src0
=
saved_in1
,
blocksize
(
p8
)
sub
in2
=
saved_in2
,
blocksize
(
p8
)
br.dpnt
.4
k_block
;;
/*
we
have
up
to
15
byte
to
copy
in
the
tail
.
*
part
of
work
is
already
done
in
the
jump
table
code
*
we
are
at
the
following
state
.
*
src
side
:
*
*
xxxxxx
xx
<-----
r21
has
xxxxxxxx
already
*
--------
--------
--------
*
0
8
16
*
^
*
|
*
src1
*
*
dst
*
--------
--------
--------
*
^
*
|
*
dst1
*/
EX
(.
ex_handler
,
(
p6
)
st8
[
dst1
]=
r21
,
8
)
//
more
than
8
byte
to
copy
(
p6
)
add
curlen
=-
8
,
curlen
//
update
length
mov
ar
.
pfs
=
saved_pfs
;;
mov
ar
.
lc
=
saved_lc
mov
pr
=
saved_pr
,-
1
mov
in2
=
curlen
//
remaining
length
mov
dst0
=
dst1
//
dest
pointer
add
src0
=
src1
,
r30
//
forward
by
src
alignment
;;
//
7
byte
or
smaller
.
.
memcpy_short
:
cmp.le
p8
,
p9
=
1
,
in2
cmp.le
p10
,
p11
=
2
,
in2
cmp.le
p12
,
p13
=
3
,
in2
cmp.le
p14
,
p15
=
4
,
in2
add
src1
=
1
,
src0
//
second
src
pointer
add
dst1
=
1
,
dst0
//
second
dest
pointer
;;
EX
(.
ex_handler_short
,
(
p8
)
ld1
t1
=[
src0
],
2
)
EK
(.
ex_handler_short
,
(
p10
)
ld1
t2
=[
src1
],
2
)
(
p9
)
br.ret.dpnt
rp
//
0
byte
copy
;;
EX
(.
ex_handler_short
,
(
p8
)
st1
[
dst0
]=
t1
,
2
)
EK
(.
ex_handler_short
,
(
p10
)
st1
[
dst1
]=
t2
,
2
)
(
p11
)
br.ret.dpnt
rp
//
1
byte
copy
EX
(.
ex_handler_short
,
(
p12
)
ld1
t3
=[
src0
],
2
)
EK
(.
ex_handler_short
,
(
p14
)
ld1
t4
=[
src1
],
2
)
(
p13
)
br.ret.dpnt
rp
//
2
byte
copy
;;
cmp.le
p6
,
p7
=
5
,
in2
cmp.le
p8
,
p9
=
6
,
in2
cmp.le
p10
,
p11
=
7
,
in2
EX
(.
ex_handler_short
,
(
p12
)
st1
[
dst0
]=
t3
,
2
)
EK
(.
ex_handler_short
,
(
p14
)
st1
[
dst1
]=
t4
,
2
)
(
p15
)
br.ret.dpnt
rp
//
3
byte
copy
;;
EX
(.
ex_handler_short
,
(
p6
)
ld1
t5
=[
src0
],
2
)
EK
(.
ex_handler_short
,
(
p8
)
ld1
t6
=[
src1
],
2
)
(
p7
)
br.ret.dpnt
rp
//
4
byte
copy
;;
EX
(.
ex_handler_short
,
(
p6
)
st1
[
dst0
]=
t5
,
2
)
EK
(.
ex_handler_short
,
(
p8
)
st1
[
dst1
]=
t6
,
2
)
(
p9
)
br.ret.dptk
rp
//
5
byte
copy
EX
(.
ex_handler_short
,
(
p10
)
ld1
t7
=[
src0
],
2
)
(
p11
)
br.ret.dptk
rp
//
6
byte
copy
;;
EX
(.
ex_handler_short
,
(
p10
)
st1
[
dst0
]=
t7
,
2
)
br.ret.dptk
rp
//
done
all
cases
/*
Align
dest
to
nearest
8
-
byte
boundary
.
We
know
we
have
at
*
least
7
bytes
to
copy
,
enough
to
crawl
to
8
-
byte
boundary
.
*
Actual
number
of
byte
to
crawl
depend
on
the
dest
alignment
.
*
7
byte
or
less
is
taken
care
at
.
memcpy_short
*
src0
-
source
even
index
*
src1
-
source
odd
index
*
dst0
-
dest
even
index
*
dst1
-
dest
odd
index
*
r30
-
distance
to
8
-
byte
boundary
*/
.
align_dest
:
add
src1
=
1
,
in1
//
source
odd
index
cmp.le
p7
,
p0
=
2
,
r30
//
for
.
align_dest
cmp.le
p8
,
p0
=
3
,
r30
//
for
.
align_dest
EX
(.
ex_handler_short
,
(
p6
)
ld1
t1
=[
src0
],
2
)
cmp.le
p9
,
p0
=
4
,
r30
//
for
.
align_dest
cmp.le
p10
,
p0
=
5
,
r30
;;
EX
(.
ex_handler_short
,
(
p7
)
ld1
t2
=[
src1
],
2
)
EK
(.
ex_handler_short
,
(
p8
)
ld1
t3
=[
src0
],
2
)
cmp.le
p11
,
p0
=
6
,
r30
EX
(.
ex_handler_short
,
(
p6
)
st1
[
dst0
]
=
t1
,
2
)
cmp.le
p12
,
p0
=
7
,
r30
;;
EX
(.
ex_handler_short
,
(
p9
)
ld1
t4
=[
src1
],
2
)
EK
(.
ex_handler_short
,
(
p10
)
ld1
t5
=[
src0
],
2
)
EX
(.
ex_handler_short
,
(
p7
)
st1
[
dst1
]
=
t2
,
2
)
EK
(.
ex_handler_short
,
(
p8
)
st1
[
dst0
]
=
t3
,
2
)
;;
EX
(.
ex_handler_short
,
(
p11
)
ld1
t6
=[
src1
],
2
)
EK
(.
ex_handler_short
,
(
p12
)
ld1
t7
=[
src0
],
2
)
cmp.eq
p6
,
p7
=
r28
,
r29
EX
(.
ex_handler_short
,
(
p9
)
st1
[
dst1
]
=
t4
,
2
)
EK
(.
ex_handler_short
,
(
p10
)
st1
[
dst0
]
=
t5
,
2
)
sub
in2
=
in2
,
r30
;;
EX
(.
ex_handler_short
,
(
p11
)
st1
[
dst1
]
=
t6
,
2
)
EK
(.
ex_handler_short
,
(
p12
)
st1
[
dst0
]
=
t7
)
add
dst0
=
in0
,
r30
//
setup
arguments
add
src0
=
in1
,
r30
(
p6
)
br.cond.dptk
.
aligned_src
(
p7
)
br.cond.dpnt
.
unaligned_src
;;
/*
main
loop
body
in
jump
table
format
*/
#define COPYU(shift) \
1
:
\
EX
(.
ex_handler
,
(
p16
)
ld8
r32
=[
src0
],
8
)
; /* 1 */ \
EK
(.
ex_handler
,
(
p16
)
ld8
r36
=[
src1
],
8
)
; \
(
p17
)
shrp
r35
=
r33
,
r34
,
shift
;; /* 1 */ \
EX
(.
ex_handler
,
(
p6
)
ld8
r22
=[
src1
])
; /* common, prime for tail section */ \
nop.m
0
; \
(
p16
)
shrp
r38
=
r36
,
r37
,
shift
; \
EX
(.
ex_handler
,
(
p17
)
st8
[
dst0
]=
r35
,
8
)
; /* 1 */ \
EK
(.
ex_handler
,
(
p17
)
st8
[
dst1
]=
r39
,
8
)
; \
br.ctop.dptk.few
1
b
;; \
(
p7
)
add
src1
=-
8
,
src1
; /* back out for <8 byte case */ \
shrp
r21
=
r22
,
r38
,
shift
; /* speculative work */ \
br.sptk.few
.
unaligned_src_tail
/*
branch
out
of
jump
table
*/
\
;;
.
align
32
.
jump_table
:
COPYU
(8)
//
unaligned
cases
.
jmp1
:
COPYU
(16)
COPYU
(24)
COPYU
(32)
COPYU
(40)
COPYU
(48)
COPYU
(56)
#undef A
#undef B
#undef C
#undef D
END
(
memcpy
)
/*
*
Due
to
lack
of
local
tag
support
in
gcc
2
.
x
assembler
,
it
is
not
clear
which
*
instruction
failed
in
the
bundle
.
The
exception
algorithm
is
that
we
*
first
figure
out
the
faulting
address
,
then
detect
if
there
is
any
*
progress
made
on
the
copy
,
if
so
,
redo
the
copy
from
last
known
copied
*
location
up
to
the
faulting
address
(
exclusive
)
.
In
the
copy_from_user
*
case
,
remaining
byte
in
kernel
buffer
will
be
zeroed
.
*
*
Take
copy_from_user
as
an
example
,
in
the
code
there
are
multiple
loads
*
in
a
bundle
and
those
multiple
loads
could
span
over
two
pages
,
the
*
faulting
address
is
calculated
as
page_round_down
(
max
(
src0
,
src1
))
.
*
This
is
based
on
knowledge
that
if
we
can
access
one
byte
in
a
page
,
we
*
can
access
any
byte
in
that
page
.
*
*
predicate
used
in
the
exception
handler
:
*
p6
-
p7
:
direction
*
p10
-
p11
:
src
faulting
addr
calculation
*
p12
-
p13
:
dst
faulting
addr
calculation
*/
#define A r19
#define B r20
#define C r21
#define D r22
#define F r28
#define memset_arg0 r32
#define memset_arg2 r33
#define saved_retval loc0
#define saved_rtlink loc1
#define saved_pfs_stack loc2
.
ex_hndlr_s
:
add
src0
=
8
,
src0
br.sptk
.
ex_handler
;;
.
ex_hndlr_d
:
add
dst0
=
8
,
dst0
br.sptk
.
ex_handler
;;
.
ex_hndlr_lcpy_1
:
mov
src1
=
src_pre_mem
mov
dst1
=
dst_pre_mem
cmp.gtu
p10
,
p11
=
src_pre_mem
,
saved_in1
cmp.gtu
p12
,
p13
=
dst_pre_mem
,
saved_in0
;;
(
p10
)
add
src0
=
8
,
saved_in1
(
p11
)
mov
src0
=
saved_in1
(
p12
)
add
dst0
=
8
,
saved_in0
(
p13
)
mov
dst0
=
saved_in0
br.sptk
.
ex_handler
.
ex_handler_lcpy
:
//
in
line_copy
block
,
the
preload
addresses
should
always
ahead
//
of
the
other
two
src
/
dst
pointers
.
Furthermore
,
src1
/
dst1
should
//
always
ahead
of
src0
/
dst0
.
mov
src1
=
src_pre_mem
mov
dst1
=
dst_pre_mem
.
ex_handler
:
mov
pr
=
saved_pr
,-
1
//
first
restore
pr
,
lc
,
and
pfs
mov
ar
.
lc
=
saved_lc
mov
ar
.
pfs
=
saved_pfs
;;
.
ex_handler_short
:
//
fault
occurred
in
these
sections
didn
't change pr, lc, pfs
cmp.ltu
p6
,
p7
=
saved_in0
,
saved_in1
//
get
the
copy
direction
cmp.ltu
p10
,
p11
=
src0
,
src1
cmp.ltu
p12
,
p13
=
dst0
,
dst1
fcmp.eq
p8
,
p0
=
f6
,
f0
//
is
it
memcpy
?
mov
tmp
=
dst0
;;
(
p11
)
mov
src1
=
src0
//
pick
the
larger
of
the
two
(
p13
)
mov
dst0
=
dst1
//
make
dst0
the
smaller
one
(
p13
)
mov
dst1
=
tmp
//
and
dst1
the
larger
one
;;
(
p6
)
dep
F
=
r0
,
dst1
,
0
,
PAGE_SHIFT
//
usr
dst
round
down
to
page
boundary
(
p7
)
dep
F
=
r0
,
src1
,
0
,
PAGE_SHIFT
//
usr
src
round
down
to
page
boundary
;;
(
p6
)
cmp.le
p14
,
p0
=
dst0
,
saved_in0
//
no
progress
has
been
made
on
store
(
p7
)
cmp.le
p14
,
p0
=
src0
,
saved_in1
//
no
progress
has
been
made
on
load
mov
retval
=
saved_in2
(
p8
)
ld1
tmp
=[
src1
]
//
force
an
oops
for
memcpy
call
(
p8
)
st1
[
dst1
]=
r0
//
force
an
oops
for
memcpy
call
(
p14
)
br.ret.sptk.many
rp
/*
*
The
remaining
byte
to
copy
is
calculated
as
:
*
*
A
=
(
faulting_addr
-
orig_src
)
->
len
to
faulting
ld
address
*
or
*
(
faulting_addr
-
orig_dst
)
->
len
to
faulting
st
address
*
B
=
(
cur_dst
-
orig_dst
)
->
len
copied
so
far
*
C
=
A
-
B
->
len
need
to
be
copied
*
D
=
orig_len
-
A
->
len
need
to
be
zeroed
*/
(
p6
)
sub
A
=
F
,
saved_in0
(
p7
)
sub
A
=
F
,
saved_in1
clrrrb
;;
alloc
saved_pfs_stack
=
ar
.
pfs
,
3
,
3
,
3
,
0
sub
B
=
dst0
,
saved_in0
//
how
many
byte
copied
so
far
;;
sub
C
=
A
,
B
sub
D
=
saved_in2
,
A
;;
cmp.gt
p8
,
p0
=
C
,
r0
//
more
than
1
byte
?
add
memset_arg0
=
saved_in0
,
A
(
p6
)
mov
memset_arg2
=
0
//
copy_to_user
should
not
call
memset
(
p7
)
mov
memset_arg2
=
D
//
copy_from_user
need
to
have
kbuf
zeroed
mov
r8
=
0
mov
saved_retval
=
D
mov
saved_rtlink
=
b0
add
out0
=
saved_in0
,
B
add
out1
=
saved_in1
,
B
mov
out2
=
C
(
p8
)
br.call.sptk.few
b0
=
__copy_user
//
recursive
call
;;
add
saved_retval
=
saved_retval
,
r8
//
above
might
return
non
-
zero
value
cmp.gt
p8
,
p0
=
memset_arg2
,
r0
//
more
than
1
byte
?
mov
out0
=
memset_arg0
//
*
s
mov
out1
=
r0
//
c
mov
out2
=
memset_arg2
//
n
(
p8
)
br.call.sptk.few
b0
=
memset
;;
mov
retval
=
saved_retval
mov
ar
.
pfs
=
saved_pfs_stack
mov
b0
=
saved_rtlink
br.ret.sptk.many
rp
/*
end
of
McKinley
specific
optimization
*/
END
(
__copy_user
)
arch/ia64/lib/swiotlb.c
View file @
8beb1642
...
@@ -415,18 +415,20 @@ int
...
@@ -415,18 +415,20 @@ int
swiotlb_map_sg
(
struct
pci_dev
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
int
direction
)
swiotlb_map_sg
(
struct
pci_dev
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
int
direction
)
{
{
void
*
addr
;
void
*
addr
;
unsigned
long
pci_addr
;
int
i
;
int
i
;
if
(
direction
==
PCI_DMA_NONE
)
if
(
direction
==
PCI_DMA_NONE
)
BUG
();
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
{
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
{
sg
->
orig_address
=
SG_ENT_VIRT_ADDRESS
(
sg
);
addr
=
SG_ENT_VIRT_ADDRESS
(
sg
);
if
((
SG_ENT_PHYS_ADDRESS
(
sg
)
&
~
hwdev
->
dma_mask
)
!=
0
)
{
pci_addr
=
virt_to_phys
(
addr
);
addr
=
map_single
(
hwdev
,
sg
->
orig_address
,
sg
->
length
,
direction
);
if
((
pci_addr
&
~
hwdev
->
dma_mask
)
!=
0
)
sg
->
page
=
virt_to_page
(
addr
);
sg
->
dma_address
=
map_single
(
hwdev
,
addr
,
sg
->
length
,
direction
);
sg
->
offset
=
(
u64
)
addr
&
~
PAGE_MASK
;
else
}
sg
->
dma_address
=
pci_addr
;
sg
->
dma_length
=
sg
->
length
;
}
}
return
nelems
;
return
nelems
;
}
}
...
@@ -444,12 +446,10 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
...
@@ -444,12 +446,10 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
BUG
();
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
if
(
sg
->
orig_address
!=
SG_ENT_VIRT_ADDRESS
(
sg
))
{
if
(
sg
->
dma_address
!=
SG_ENT_PHYS_ADDRESS
(
sg
))
unmap_single
(
hwdev
,
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
length
,
direction
);
unmap_single
(
hwdev
,
sg
->
dma_address
,
sg
->
dma_length
,
direction
);
sg
->
page
=
virt_to_page
(
sg
->
orig_address
);
else
if
(
direction
==
PCI_DMA_FROMDEVICE
)
sg
->
offset
=
(
u64
)
sg
->
orig_address
&
~
PAGE_MASK
;
mark_clean
(
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
dma_length
);
}
else
if
(
direction
==
PCI_DMA_FROMDEVICE
)
mark_clean
(
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
length
);
}
}
/*
/*
...
@@ -468,14 +468,14 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
...
@@ -468,14 +468,14 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
BUG
();
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
if
(
sg
->
orig_address
!=
SG_ENT_VIRT
_ADDRESS
(
sg
))
if
(
sg
->
dma_address
!=
SG_ENT_PHYS
_ADDRESS
(
sg
))
sync_single
(
hwdev
,
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
length
,
direction
);
sync_single
(
hwdev
,
sg
->
dma_address
,
sg
->
dma_
length
,
direction
);
}
}
unsigned
long
unsigned
long
swiotlb_dma_address
(
struct
scatterlist
*
sg
)
swiotlb_dma_address
(
struct
scatterlist
*
sg
)
{
{
return
SG_ENT_PHYS_ADDRESS
(
sg
)
;
return
sg
->
dma_address
;
}
}
/*
/*
...
...
arch/ia64/mm/init.c
View file @
8beb1642
...
@@ -10,6 +10,7 @@
...
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/reboot.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swap.h>
...
@@ -68,10 +69,9 @@ ia64_init_addr_space (void)
...
@@ -68,10 +69,9 @@ ia64_init_addr_space (void)
struct
vm_area_struct
*
vma
;
struct
vm_area_struct
*
vma
;
/*
/*
* If we're out of memory and kmem_cache_alloc() returns NULL,
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* we simply ignore the problem. When the process attempts to
* the problem. When the process attempts to write to the register backing store
* write to the register backing store for the first time, it
* for the first time, it will get a SEGFAULT in this case.
* will get a SEGFAULT in this case.
*/
*/
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
if
(
vma
)
{
if
(
vma
)
{
...
@@ -86,6 +86,19 @@ ia64_init_addr_space (void)
...
@@ -86,6 +86,19 @@ ia64_init_addr_space (void)
vma
->
vm_private_data
=
NULL
;
vma
->
vm_private_data
=
NULL
;
insert_vm_struct
(
current
->
mm
,
vma
);
insert_vm_struct
(
current
->
mm
,
vma
);
}
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if
(
!
(
current
->
personality
&
MMAP_PAGE_ZERO
))
{
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
if
(
vma
)
{
memset
(
vma
,
0
,
sizeof
(
*
vma
));
vma
->
vm_mm
=
current
->
mm
;
vma
->
vm_end
=
PAGE_SIZE
;
vma
->
vm_page_prot
=
__pgprot
(
pgprot_val
(
PAGE_READONLY
)
|
_PAGE_MA_NAT
);
vma
->
vm_flags
=
VM_READ
|
VM_MAYREAD
|
VM_IO
|
VM_RESERVED
;
insert_vm_struct
(
current
->
mm
,
vma
);
}
}
}
}
void
void
...
...
arch/ia64/mm/tlb.c
View file @
8beb1642
...
@@ -35,10 +35,10 @@
...
@@ -35,10 +35,10 @@
1 << _PAGE_SIZE_4K )
1 << _PAGE_SIZE_4K )
struct
ia64_ctx
ia64_ctx
=
{
struct
ia64_ctx
ia64_ctx
=
{
lock:
SPIN_LOCK_UNLOCKED
,
.
lock
=
SPIN_LOCK_UNLOCKED
,
next:
1
,
.
next
=
1
,
limit:
(
1
<<
15
)
-
1
,
/* start out with the safe (architected) limit */
.
limit
=
(
1
<<
15
)
-
1
,
/* start out with the safe (architected) limit */
max_ctx:
~
0U
.
max_ctx
=
~
0U
};
};
/*
/*
...
@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
...
@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
{
{
unsigned
long
tsk_context
,
max_ctx
=
ia64_ctx
.
max_ctx
;
unsigned
long
tsk_context
,
max_ctx
=
ia64_ctx
.
max_ctx
;
struct
task_struct
*
tsk
;
struct
task_struct
*
tsk
;
int
i
;
if
(
ia64_ctx
.
next
>
max_ctx
)
if
(
ia64_ctx
.
next
>
max_ctx
)
ia64_ctx
.
next
=
300
;
/* skip daemons */
ia64_ctx
.
next
=
300
;
/* skip daemons */
...
@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
...
@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
ia64_ctx
.
limit
=
tsk_context
;
ia64_ctx
.
limit
=
tsk_context
;
}
}
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
flush_tlb_all
();
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for
(
i
=
0
;
i
<
smp_num_cpus
;
++
i
)
if
(
i
!=
smp_processor_id
())
per_cpu
(
ia64_need_tlb_flush
,
i
)
=
1
;
__flush_tlb_all
();
}
}
void
void
...
...
arch/ia64/sn/io/ifconfig_net.c
View file @
8beb1642
...
@@ -279,9 +279,9 @@ static int ifconfig_net_ioctl(struct inode * inode, struct file * file,
...
@@ -279,9 +279,9 @@ static int ifconfig_net_ioctl(struct inode * inode, struct file * file,
}
}
struct
file_operations
ifconfig_net_fops
=
{
struct
file_operations
ifconfig_net_fops
=
{
ioctl:
ifconfig_net_ioctl
,
/* ioctl */
.
ioctl
=
ifconfig_net_ioctl
,
/* ioctl */
open:
ifconfig_net_open
,
/* open */
.
open
=
ifconfig_net_open
,
/* open */
release:
ifconfig_net_close
/* release */
.
release
=
ifconfig_net_close
/* release */
};
};
...
...
arch/ia64/sn/io/pciba.c
View file @
8beb1642
...
@@ -210,31 +210,31 @@ static void dump_allocations(struct list_head * dalp);
...
@@ -210,31 +210,31 @@ static void dump_allocations(struct list_head * dalp);
/* file operations for each type of node */
/* file operations for each type of node */
static
struct
file_operations
rom_fops
=
{
static
struct
file_operations
rom_fops
=
{
owner:
THIS_MODULE
,
.
owner
=
THIS_MODULE
,
mmap:
rom_mmap
,
.
mmap
=
rom_mmap
,
open:
generic_open
,
.
open
=
generic_open
,
release:
rom_release
.
release
=
rom_release
};
};
static
struct
file_operations
base_fops
=
{
static
struct
file_operations
base_fops
=
{
owner:
THIS_MODULE
,
.
owner
=
THIS_MODULE
,
mmap:
base_mmap
,
.
mmap
=
base_mmap
,
open:
generic_open
.
open
=
generic_open
};
};
static
struct
file_operations
config_fops
=
{
static
struct
file_operations
config_fops
=
{
owner:
THIS_MODULE
,
.
owner
=
THIS_MODULE
,
ioctl:
config_ioctl
,
.
ioctl
=
config_ioctl
,
open:
generic_open
.
open
=
generic_open
};
};
static
struct
file_operations
dma_fops
=
{
static
struct
file_operations
dma_fops
=
{
owner:
THIS_MODULE
,
.
owner
=
THIS_MODULE
,
ioctl:
dma_ioctl
,
.
ioctl
=
dma_ioctl
,
mmap:
dma_mmap
,
.
mmap
=
dma_mmap
,
open:
generic_open
.
open
=
generic_open
};
};
...
...
arch/ia64/sn/io/sn1/hubcounters.c
View file @
8beb1642
...
@@ -24,7 +24,7 @@ extern void hubni_error_handler(char *, int); /* huberror.c */
...
@@ -24,7 +24,7 @@ extern void hubni_error_handler(char *, int); /* huberror.c */
static
int
hubstats_ioctl
(
struct
inode
*
,
struct
file
*
,
unsigned
int
,
unsigned
long
);
static
int
hubstats_ioctl
(
struct
inode
*
,
struct
file
*
,
unsigned
int
,
unsigned
long
);
struct
file_operations
hub_mon_fops
=
{
struct
file_operations
hub_mon_fops
=
{
ioctl:
hubstats_ioctl
,
.
ioctl
=
hubstats_ioctl
,
};
};
#define HUB_CAPTURE_TICKS (2 * HZ)
#define HUB_CAPTURE_TICKS (2 * HZ)
...
...
arch/ia64/sn/io/sn1/pcibr.c
View file @
8beb1642
...
@@ -307,22 +307,22 @@ extern void free_pciio_dmamap(pcibr_dmamap_t);
...
@@ -307,22 +307,22 @@ extern void free_pciio_dmamap(pcibr_dmamap_t);
* appropriate function name below.
* appropriate function name below.
*/
*/
struct
file_operations
pcibr_fops
=
{
struct
file_operations
pcibr_fops
=
{
owner:
THIS_MODULE
,
.
owner
=
THIS_MODULE
,
llseek:
NULL
,
.
llseek
=
NULL
,
read:
NULL
,
.
read
=
NULL
,
write:
NULL
,
.
write
=
NULL
,
readdir:
NULL
,
.
readdir
=
NULL
,
poll:
NULL
,
.
poll
=
NULL
,
ioctl:
NULL
,
.
ioctl
=
NULL
,
mmap:
NULL
,
.
mmap
=
NULL
,
open:
NULL
,
.
open
=
NULL
,
flush:
NULL
,
.
flush
=
NULL
,
release:
NULL
,
.
release
=
NULL
,
fsync:
NULL
,
.
fsync
=
NULL
,
fasync:
NULL
,
.
fasync
=
NULL
,
lock:
NULL
,
.
lock
=
NULL
,
readv:
NULL
,
.
readv
=
NULL
,
writev:
NULL
.
writev
=
NULL
};
};
extern
devfs_handle_t
hwgraph_root
;
extern
devfs_handle_t
hwgraph_root
;
...
...
arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
View file @
8beb1642
...
@@ -64,22 +64,22 @@ int pcibr_devflag = D_MP;
...
@@ -64,22 +64,22 @@ int pcibr_devflag = D_MP;
* appropriate function name below.
* appropriate function name below.
*/
*/
struct
file_operations
pcibr_fops
=
{
struct
file_operations
pcibr_fops
=
{
owner:
THIS_MODULE
,
.
owner
=
THIS_MODULE
,
llseek:
NULL
,
.
llseek
=
NULL
,
read:
NULL
,
.
read
=
NULL
,
write:
NULL
,
.
write
=
NULL
,
readdir:
NULL
,
.
readdir
=
NULL
,
poll:
NULL
,
.
poll
=
NULL
,
ioctl:
NULL
,
.
ioctl
=
NULL
,
mmap:
NULL
,
.
mmap
=
NULL
,
open:
NULL
,
.
open
=
NULL
,
flush:
NULL
,
.
flush
=
NULL
,
release:
NULL
,
.
release
=
NULL
,
fsync:
NULL
,
.
fsync
=
NULL
,
fasync:
NULL
,
.
fasync
=
NULL
,
lock:
NULL
,
.
lock
=
NULL
,
readv:
NULL
,
.
readv
=
NULL
,
writev:
NULL
.
writev
=
NULL
};
};
#ifdef LATER
#ifdef LATER
...
...
arch/ia64/sn/kernel/setup.c
View file @
8beb1642
...
@@ -109,14 +109,14 @@ irqpda_t *irqpdaindr[NR_CPUS];
...
@@ -109,14 +109,14 @@ irqpda_t *irqpdaindr[NR_CPUS];
* VGA color display.
* VGA color display.
*/
*/
struct
screen_info
sn1_screen_info
=
{
struct
screen_info
sn1_screen_info
=
{
orig_x:
0
,
.
orig_x
=
0
,
orig_y:
0
,
.
orig_y
=
0
,
orig_video_mode:
3
,
.
orig_video_mode
=
3
,
orig_video_cols:
80
,
.
orig_video_cols
=
80
,
orig_video_ega_bx:
3
,
.
orig_video_ega_bx
=
3
,
orig_video_lines:
25
,
.
orig_video_lines
=
25
,
orig_video_isVGA:
1
,
.
orig_video_isVGA
=
1
,
orig_video_points:
16
.
orig_video_points
=
16
};
};
/*
/*
...
@@ -170,9 +170,9 @@ early_sn1_setup(void)
...
@@ -170,9 +170,9 @@ early_sn1_setup(void)
#ifdef NOT_YET_CONFIG_IA64_MCA
#ifdef NOT_YET_CONFIG_IA64_MCA
extern
void
ia64_mca_cpe_int_handler
(
int
cpe_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
);
extern
void
ia64_mca_cpe_int_handler
(
int
cpe_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
);
static
struct
irqaction
mca_cpe_irqaction
=
{
static
struct
irqaction
mca_cpe_irqaction
=
{
handler:
ia64_mca_cpe_int_handler
,
.
handler
=
ia64_mca_cpe_int_handler
,
flags:
SA_INTERRUPT
,
.
flags
=
SA_INTERRUPT
,
name:
"cpe_hndlr"
.
name
=
"cpe_hndlr"
};
};
#endif
#endif
#ifdef CONFIG_IA64_MCA
#ifdef CONFIG_IA64_MCA
...
...
include/asm-ia64/bitops.h
View file @
8beb1642
...
@@ -326,7 +326,7 @@ ia64_fls (unsigned long x)
...
@@ -326,7 +326,7 @@ ia64_fls (unsigned long x)
return
exp
-
0xffff
;
return
exp
-
0xffff
;
}
}
static
int
static
in
line
in
t
fls
(
int
x
)
fls
(
int
x
)
{
{
return
ia64_fls
((
unsigned
int
)
x
);
return
ia64_fls
((
unsigned
int
)
x
);
...
...
include/asm-ia64/delay.h
View file @
8beb1642
...
@@ -53,7 +53,7 @@ ia64_get_itc (void)
...
@@ -53,7 +53,7 @@ ia64_get_itc (void)
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
#ifdef CONFIG_ITANIUM
#ifdef CONFIG_ITANIUM
while
(
unlikely
((
__s32
)
result
==
-
1
)
while
(
unlikely
((
__s32
)
result
==
-
1
)
)
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
#endif
#endif
return
result
;
return
result
;
...
...
include/asm-ia64/keyboard.h
View file @
8beb1642
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
#define KEYBOARD_IRQ isa_irq_to_vector(1)
#define KEYBOARD_IRQ isa_irq_to_vector(1)
#define DISABLE_KBD_DURING_INTERRUPTS 0
#define DISABLE_KBD_DURING_INTERRUPTS 0
extern
unsigned
char
acpi_kbd_controller_present
;
extern
int
pckbd_setkeycode
(
unsigned
int
scancode
,
unsigned
int
keycode
);
extern
int
pckbd_setkeycode
(
unsigned
int
scancode
,
unsigned
int
keycode
);
extern
int
pckbd_getkeycode
(
unsigned
int
scancode
);
extern
int
pckbd_getkeycode
(
unsigned
int
scancode
);
extern
int
pckbd_pretranslate
(
unsigned
char
scancode
,
char
raw_mode
);
extern
int
pckbd_pretranslate
(
unsigned
char
scancode
,
char
raw_mode
);
...
@@ -26,6 +27,7 @@ extern void pckbd_leds(unsigned char leds);
...
@@ -26,6 +27,7 @@ extern void pckbd_leds(unsigned char leds);
extern
void
pckbd_init_hw
(
void
);
extern
void
pckbd_init_hw
(
void
);
extern
unsigned
char
pckbd_sysrq_xlate
[
128
];
extern
unsigned
char
pckbd_sysrq_xlate
[
128
];
#define kbd_controller_present() acpi_kbd_controller_present
#define kbd_setkeycode pckbd_setkeycode
#define kbd_setkeycode pckbd_setkeycode
#define kbd_getkeycode pckbd_getkeycode
#define kbd_getkeycode pckbd_getkeycode
#define kbd_pretranslate pckbd_pretranslate
#define kbd_pretranslate pckbd_pretranslate
...
...
include/asm-ia64/kregs.h
View file @
8beb1642
...
@@ -64,6 +64,15 @@
...
@@ -64,6 +64,15 @@
#define IA64_PSR_RI_BIT 41
#define IA64_PSR_RI_BIT 41
#define IA64_PSR_ED_BIT 43
#define IA64_PSR_ED_BIT 43
#define IA64_PSR_BN_BIT 44
#define IA64_PSR_BN_BIT 44
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
execve(). */
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
...
...
include/asm-ia64/machvec.h
View file @
8beb1642
...
@@ -210,6 +210,7 @@ extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
...
@@ -210,6 +210,7 @@ extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
extern
ia64_mv_pci_dma_sync_single
swiotlb_sync_single
;
extern
ia64_mv_pci_dma_sync_single
swiotlb_sync_single
;
extern
ia64_mv_pci_dma_sync_sg
swiotlb_sync_sg
;
extern
ia64_mv_pci_dma_sync_sg
swiotlb_sync_sg
;
extern
ia64_mv_pci_dma_address
swiotlb_dma_address
;
extern
ia64_mv_pci_dma_address
swiotlb_dma_address
;
extern
ia64_mv_pci_dma_supported
swiotlb_pci_dma_supported
;
/*
/*
* Define default versions so we can extend machvec for new platforms without having
* Define default versions so we can extend machvec for new platforms without having
...
...
include/asm-ia64/machvec_init.h
View file @
8beb1642
...
@@ -16,6 +16,7 @@ extern ia64_mv_inl_t __ia64_inl;
...
@@ -16,6 +16,7 @@ extern ia64_mv_inl_t __ia64_inl;
extern
ia64_mv_outb_t
__ia64_outb
;
extern
ia64_mv_outb_t
__ia64_outb
;
extern
ia64_mv_outw_t
__ia64_outw
;
extern
ia64_mv_outw_t
__ia64_outw
;
extern
ia64_mv_outl_t
__ia64_outl
;
extern
ia64_mv_outl_t
__ia64_outl
;
extern
ia64_mv_mmiob_t
__ia64_mmiob
;
#define MACHVEC_HELPER(name) \
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
...
...
include/asm-ia64/mmu_context.h
View file @
8beb1642
...
@@ -2,8 +2,8 @@
...
@@ -2,8 +2,8 @@
#define _ASM_IA64_MMU_CONTEXT_H
#define _ASM_IA64_MMU_CONTEXT_H
/*
/*
* Copyright (C) 1998-200
1
Hewlett-Packard Co
* Copyright (C) 1998-200
2
Hewlett-Packard Co
*
Copyright (C) 1998-2001
David Mosberger-Tang <davidm@hpl.hp.com>
*
David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
/*
/*
...
@@ -13,8 +13,6 @@
...
@@ -13,8 +13,6 @@
* consider the region number when performing a TLB lookup, we need to assign a unique
* consider the region number when performing a TLB lookup, we need to assign a unique
* region id to each region in a process. We use the least significant three bits in a
* region id to each region in a process. We use the least significant three bits in a
* region id for this purpose.
* region id for this purpose.
*
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
#define IA64_REGION_ID_KERNEL 0
/* the kernel's region id (tlb.c depends on this being 0) */
#define IA64_REGION_ID_KERNEL 0
/* the kernel's region id (tlb.c depends on this being 0) */
...
@@ -23,6 +21,8 @@
...
@@ -23,6 +21,8 @@
# ifndef __ASSEMBLY__
# ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
...
@@ -36,6 +36,7 @@ struct ia64_ctx {
...
@@ -36,6 +36,7 @@ struct ia64_ctx {
};
};
extern
struct
ia64_ctx
ia64_ctx
;
extern
struct
ia64_ctx
ia64_ctx
;
extern
u8
ia64_need_tlb_flush
__per_cpu_data
;
extern
void
wrap_mmu_context
(
struct
mm_struct
*
mm
);
extern
void
wrap_mmu_context
(
struct
mm_struct
*
mm
);
...
@@ -44,6 +45,23 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
...
@@ -44,6 +45,23 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
{
}
}
/*
* When the context counter wraps around all TLBs need to be flushed because an old
* context number might have been reused. This is signalled by the ia64_need_tlb_flush
* per-CPU variable, which is checked in the routine below. Called by activate_mm().
* <efocht@ess.nec.de>
*/
static
inline
void
delayed_tlb_flush
(
void
)
{
extern
void
__flush_tlb_all
(
void
);
if
(
unlikely
(
ia64_need_tlb_flush
))
{
__flush_tlb_all
();
ia64_need_tlb_flush
=
0
;
}
}
static
inline
void
static
inline
void
get_new_mmu_context
(
struct
mm_struct
*
mm
)
get_new_mmu_context
(
struct
mm_struct
*
mm
)
{
{
...
@@ -54,7 +72,6 @@ get_new_mmu_context (struct mm_struct *mm)
...
@@ -54,7 +72,6 @@ get_new_mmu_context (struct mm_struct *mm)
mm
->
context
=
ia64_ctx
.
next
++
;
mm
->
context
=
ia64_ctx
.
next
++
;
}
}
spin_unlock
(
&
ia64_ctx
.
lock
);
spin_unlock
(
&
ia64_ctx
.
lock
);
}
}
static
inline
void
static
inline
void
...
@@ -109,6 +126,8 @@ reload_context (struct mm_struct *mm)
...
@@ -109,6 +126,8 @@ reload_context (struct mm_struct *mm)
static
inline
void
static
inline
void
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
{
{
delayed_tlb_flush
();
/*
/*
* We may get interrupts here, but that's OK because interrupt
* We may get interrupts here, but that's OK because interrupt
* handlers cannot touch user-space.
* handlers cannot touch user-space.
...
...
include/asm-ia64/offsets.h
deleted
100644 → 0
View file @
12ebbff8
#ifndef _ASM_IA64_OFFSETS_H
#define _ASM_IA64_OFFSETS_H
/*
* DO NOT MODIFY
*
* This file was generated by arch/ia64/tools/print_offsets.awk.
*
*/
#define IA64_TASK_SIZE 3952
/* 0xf70 */
#define IA64_THREAD_INFO_SIZE 32
/* 0x20 */
#define IA64_PT_REGS_SIZE 400
/* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560
/* 0x230 */
#define IA64_SIGINFO_SIZE 128
/* 0x80 */
#define IA64_CPU_SIZE 224
/* 0xe0 */
#define SIGFRAME_SIZE 2816
/* 0xb00 */
#define UNW_FRAME_INFO_SIZE 448
/* 0x1c0 */
#define IA64_TASK_THREAD_KSP_OFFSET 1496
/* 0x5d8 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0
/* 0x0 */
#define IA64_PT_REGS_CR_IIP_OFFSET 8
/* 0x8 */
#define IA64_PT_REGS_CR_IFS_OFFSET 16
/* 0x10 */
#define IA64_PT_REGS_AR_UNAT_OFFSET 24
/* 0x18 */
#define IA64_PT_REGS_AR_PFS_OFFSET 32
/* 0x20 */
#define IA64_PT_REGS_AR_RSC_OFFSET 40
/* 0x28 */
#define IA64_PT_REGS_AR_RNAT_OFFSET 48
/* 0x30 */
#define IA64_PT_REGS_AR_BSPSTORE_OFFSET 56
/* 0x38 */
#define IA64_PT_REGS_PR_OFFSET 64
/* 0x40 */
#define IA64_PT_REGS_B6_OFFSET 72
/* 0x48 */
#define IA64_PT_REGS_LOADRS_OFFSET 80
/* 0x50 */
#define IA64_PT_REGS_R1_OFFSET 88
/* 0x58 */
#define IA64_PT_REGS_R2_OFFSET 96
/* 0x60 */
#define IA64_PT_REGS_R3_OFFSET 104
/* 0x68 */
#define IA64_PT_REGS_R12_OFFSET 112
/* 0x70 */
#define IA64_PT_REGS_R13_OFFSET 120
/* 0x78 */
#define IA64_PT_REGS_R14_OFFSET 128
/* 0x80 */
#define IA64_PT_REGS_R15_OFFSET 136
/* 0x88 */
#define IA64_PT_REGS_R8_OFFSET 144
/* 0x90 */
#define IA64_PT_REGS_R9_OFFSET 152
/* 0x98 */
#define IA64_PT_REGS_R10_OFFSET 160
/* 0xa0 */
#define IA64_PT_REGS_R11_OFFSET 168
/* 0xa8 */
#define IA64_PT_REGS_R16_OFFSET 176
/* 0xb0 */
#define IA64_PT_REGS_R17_OFFSET 184
/* 0xb8 */
#define IA64_PT_REGS_R18_OFFSET 192
/* 0xc0 */
#define IA64_PT_REGS_R19_OFFSET 200
/* 0xc8 */
#define IA64_PT_REGS_R20_OFFSET 208
/* 0xd0 */
#define IA64_PT_REGS_R21_OFFSET 216
/* 0xd8 */
#define IA64_PT_REGS_R22_OFFSET 224
/* 0xe0 */
#define IA64_PT_REGS_R23_OFFSET 232
/* 0xe8 */
#define IA64_PT_REGS_R24_OFFSET 240
/* 0xf0 */
#define IA64_PT_REGS_R25_OFFSET 248
/* 0xf8 */
#define IA64_PT_REGS_R26_OFFSET 256
/* 0x100 */
#define IA64_PT_REGS_R27_OFFSET 264
/* 0x108 */
#define IA64_PT_REGS_R28_OFFSET 272
/* 0x110 */
#define IA64_PT_REGS_R29_OFFSET 280
/* 0x118 */
#define IA64_PT_REGS_R30_OFFSET 288
/* 0x120 */
#define IA64_PT_REGS_R31_OFFSET 296
/* 0x128 */
#define IA64_PT_REGS_AR_CCV_OFFSET 304
/* 0x130 */
#define IA64_PT_REGS_AR_FPSR_OFFSET 312
/* 0x138 */
#define IA64_PT_REGS_B0_OFFSET 320
/* 0x140 */
#define IA64_PT_REGS_B7_OFFSET 328
/* 0x148 */
#define IA64_PT_REGS_F6_OFFSET 336
/* 0x150 */
#define IA64_PT_REGS_F7_OFFSET 352
/* 0x160 */
#define IA64_PT_REGS_F8_OFFSET 368
/* 0x170 */
#define IA64_PT_REGS_F9_OFFSET 384
/* 0x180 */
#define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0
/* 0x0 */
#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8
/* 0x8 */
#define IA64_SWITCH_STACK_F2_OFFSET 16
/* 0x10 */
#define IA64_SWITCH_STACK_F3_OFFSET 32
/* 0x20 */
#define IA64_SWITCH_STACK_F4_OFFSET 48
/* 0x30 */
#define IA64_SWITCH_STACK_F5_OFFSET 64
/* 0x40 */
#define IA64_SWITCH_STACK_F10_OFFSET 80
/* 0x50 */
#define IA64_SWITCH_STACK_F11_OFFSET 96
/* 0x60 */
#define IA64_SWITCH_STACK_F12_OFFSET 112
/* 0x70 */
#define IA64_SWITCH_STACK_F13_OFFSET 128
/* 0x80 */
#define IA64_SWITCH_STACK_F14_OFFSET 144
/* 0x90 */
#define IA64_SWITCH_STACK_F15_OFFSET 160
/* 0xa0 */
#define IA64_SWITCH_STACK_F16_OFFSET 176
/* 0xb0 */
#define IA64_SWITCH_STACK_F17_OFFSET 192
/* 0xc0 */
#define IA64_SWITCH_STACK_F18_OFFSET 208
/* 0xd0 */
#define IA64_SWITCH_STACK_F19_OFFSET 224
/* 0xe0 */
#define IA64_SWITCH_STACK_F20_OFFSET 240
/* 0xf0 */
#define IA64_SWITCH_STACK_F21_OFFSET 256
/* 0x100 */
#define IA64_SWITCH_STACK_F22_OFFSET 272
/* 0x110 */
#define IA64_SWITCH_STACK_F23_OFFSET 288
/* 0x120 */
#define IA64_SWITCH_STACK_F24_OFFSET 304
/* 0x130 */
#define IA64_SWITCH_STACK_F25_OFFSET 320
/* 0x140 */
#define IA64_SWITCH_STACK_F26_OFFSET 336
/* 0x150 */
#define IA64_SWITCH_STACK_F27_OFFSET 352
/* 0x160 */
#define IA64_SWITCH_STACK_F28_OFFSET 368
/* 0x170 */
#define IA64_SWITCH_STACK_F29_OFFSET 384
/* 0x180 */
#define IA64_SWITCH_STACK_F30_OFFSET 400
/* 0x190 */
#define IA64_SWITCH_STACK_F31_OFFSET 416
/* 0x1a0 */
#define IA64_SWITCH_STACK_R4_OFFSET 432
/* 0x1b0 */
#define IA64_SWITCH_STACK_R5_OFFSET 440
/* 0x1b8 */
#define IA64_SWITCH_STACK_R6_OFFSET 448
/* 0x1c0 */
#define IA64_SWITCH_STACK_R7_OFFSET 456
/* 0x1c8 */
#define IA64_SWITCH_STACK_B0_OFFSET 464
/* 0x1d0 */
#define IA64_SWITCH_STACK_B1_OFFSET 472
/* 0x1d8 */
#define IA64_SWITCH_STACK_B2_OFFSET 480
/* 0x1e0 */
#define IA64_SWITCH_STACK_B3_OFFSET 488
/* 0x1e8 */
#define IA64_SWITCH_STACK_B4_OFFSET 496
/* 0x1f0 */
#define IA64_SWITCH_STACK_B5_OFFSET 504
/* 0x1f8 */
#define IA64_SWITCH_STACK_AR_PFS_OFFSET 512
/* 0x200 */
#define IA64_SWITCH_STACK_AR_LC_OFFSET 520
/* 0x208 */
#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528
/* 0x210 */
#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536
/* 0x218 */
#define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544
/* 0x220 */
#define IA64_SWITCH_STACK_PR_OFFSET 552
/* 0x228 */
#define IA64_SIGCONTEXT_IP_OFFSET 40
/* 0x28 */
#define IA64_SIGCONTEXT_AR_BSP_OFFSET 72
/* 0x48 */
#define IA64_SIGCONTEXT_AR_FPSR_OFFSET 104
/* 0x68 */
#define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80
/* 0x50 */
#define IA64_SIGCONTEXT_AR_UNAT_OFFSET 96
/* 0x60 */
#define IA64_SIGCONTEXT_B0_OFFSET 136
/* 0x88 */
#define IA64_SIGCONTEXT_CFM_OFFSET 48
/* 0x30 */
#define IA64_SIGCONTEXT_FLAGS_OFFSET 0
/* 0x0 */
#define IA64_SIGCONTEXT_FR6_OFFSET 560
/* 0x230 */
#define IA64_SIGCONTEXT_PR_OFFSET 128
/* 0x80 */
#define IA64_SIGCONTEXT_R12_OFFSET 296
/* 0x128 */
#define IA64_SIGCONTEXT_RBS_BASE_OFFSET 2512
/* 0x9d0 */
#define IA64_SIGCONTEXT_LOADRS_OFFSET 2520
/* 0x9d8 */
#define IA64_SIGFRAME_ARG0_OFFSET 0
/* 0x0 */
#define IA64_SIGFRAME_ARG1_OFFSET 8
/* 0x8 */
#define IA64_SIGFRAME_ARG2_OFFSET 16
/* 0x10 */
#define IA64_SIGFRAME_HANDLER_OFFSET 24
/* 0x18 */
#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 160
/* 0xa0 */
#define IA64_CLONE_VFORK 16384
/* 0x4000 */
#define IA64_CLONE_VM 256
/* 0x100 */
#endif
/* _ASM_IA64_OFFSETS_H */
include/asm-ia64/pci.h
View file @
8beb1642
...
@@ -90,7 +90,7 @@ pcibios_penalize_isa_irq (int irq)
...
@@ -90,7 +90,7 @@ pcibios_penalize_isa_irq (int irq)
/* Return the index of the PCI controller for device PDEV. */
/* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0)
#define pci_controller_num(PDEV) (0)
#define sg_dma_len(sg) ((sg)->length)
#define sg_dma_len(sg) ((sg)->
dma_
length)
#define HAVE_PCI_MMAP
#define HAVE_PCI_MMAP
extern
int
pci_mmap_page_range
(
struct
pci_dev
*
dev
,
struct
vm_area_struct
*
vma
,
extern
int
pci_mmap_page_range
(
struct
pci_dev
*
dev
,
struct
vm_area_struct
*
vma
,
...
...
include/asm-ia64/perfmon.h
View file @
8beb1642
...
@@ -172,9 +172,8 @@ extern int pfm_use_debug_registers(struct task_struct *);
...
@@ -172,9 +172,8 @@ extern int pfm_use_debug_registers(struct task_struct *);
extern
int
pfm_release_debug_registers
(
struct
task_struct
*
);
extern
int
pfm_release_debug_registers
(
struct
task_struct
*
);
extern
int
pfm_cleanup_smpl_buf
(
struct
task_struct
*
);
extern
int
pfm_cleanup_smpl_buf
(
struct
task_struct
*
);
extern
void
pfm_syst_wide_update_task
(
struct
task_struct
*
,
int
);
extern
void
pfm_syst_wide_update_task
(
struct
task_struct
*
,
int
);
extern
void
pfm_ovfl_block_reset
(
void
);
extern
void
pfm_ovfl_block_reset
(
void
);
extern
void
perfmon_init_percpu
(
void
);
extern
int
pfm_syst_wide
;
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
...
...
include/asm-ia64/processor.h
View file @
8beb1642
...
@@ -270,12 +270,8 @@ struct thread_struct {
...
@@ -270,12 +270,8 @@ struct thread_struct {
#define start_thread(regs,new_ip,new_sp) do { \
#define start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \
set_fs(USER_DS); \
ia64_psr(regs)->dfh = 1;
/* disable fph */
\
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \
ia64_psr(regs)->mfh = 0;
/* clear mfh */
\
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
ia64_psr(regs)->cpl = 3;
/* set user mode */
\
ia64_psr(regs)->ri = 0;
/* clear return slot number */
\
ia64_psr(regs)->is = 0;
/* IA-64 instruction set */
\
ia64_psr(regs)->sp = 1;
/* enforce secure perfmon */
\
regs->cr_iip = new_ip; \
regs->cr_iip = new_ip; \
regs->ar_rsc = 0xf;
/* eager mode, privilege level 3 */
\
regs->ar_rsc = 0xf;
/* eager mode, privilege level 3 */
\
regs->ar_rnat = 0; \
regs->ar_rnat = 0; \
...
@@ -284,7 +280,7 @@ struct thread_struct {
...
@@ -284,7 +280,7 @@ struct thread_struct {
regs->loadrs = 0; \
regs->loadrs = 0; \
regs->r8 = current->mm->dumpable;
/* set "don't zap registers" flag */
\
regs->r8 = current->mm->dumpable;
/* set "don't zap registers" flag */
\
regs->r12 = new_sp - 16;
/* allocate 16 byte scratch area */
\
regs->r12 = new_sp - 16;
/* allocate 16 byte scratch area */
\
if (
!likely (
current->mm->dumpable)) { \
if (
unlikely(!
current->mm->dumpable)) { \
/* \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
* uid/privileges. \
...
...
include/asm-ia64/scatterlist.h
View file @
8beb1642
...
@@ -7,12 +7,12 @@
...
@@ -7,12 +7,12 @@
*/
*/
struct
scatterlist
{
struct
scatterlist
{
char
*
orig_address
;
/* for use by swiotlb */
/* These two are only valid if ADDRESS member of this struct is NULL. */
struct
page
*
page
;
struct
page
*
page
;
unsigned
int
offset
;
unsigned
int
offset
;
unsigned
int
length
;
/* buffer length */
unsigned
int
length
;
/* buffer length */
dma_addr_t
dma_address
;
unsigned
int
dma_length
;
};
};
#define ISA_DMA_THRESHOLD (~0UL)
#define ISA_DMA_THRESHOLD (~0UL)
...
...
include/asm-ia64/softirq.h
View file @
8beb1642
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
#include <asm/hardirq.h>
#include <asm/hardirq.h>
#include <linux/compiler.h>
#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0)
#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0)
...
...
include/asm-ia64/suspend.h
0 → 100644
View file @
8beb1642
include/asm-ia64/system.h
View file @
8beb1642
...
@@ -13,6 +13,7 @@
...
@@ -13,6 +13,7 @@
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include <linux/percpu.h>
#include <asm/kregs.h>
#include <asm/kregs.h>
#include <asm/page.h>
#include <asm/page.h>
...
@@ -384,7 +385,8 @@ extern void ia64_save_extra (struct task_struct *task);
...
@@ -384,7 +385,8 @@ extern void ia64_save_extra (struct task_struct *task);
extern
void
ia64_load_extra
(
struct
task_struct
*
task
);
extern
void
ia64_load_extra
(
struct
task_struct
*
task
);
#if defined(CONFIG_SMP) && defined(CONFIG_PERFMON)
#if defined(CONFIG_SMP) && defined(CONFIG_PERFMON)
# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_wide != 0)
extern
int
__per_cpu_data
pfm_syst_wide
;
# define PERFMON_IS_SYSWIDE() (this_cpu(pfm_syst_wide) != 0)
#else
#else
# define PERFMON_IS_SYSWIDE() (0)
# define PERFMON_IS_SYSWIDE() (0)
#endif
#endif
...
...
include/asm-ia64/tlb.h
View file @
8beb1642
/* XXX fix me! */
#ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file was derived from asm-generic/tlb.h.
*/
/*
* Removing a translation from a page table (including TLB-shootdown) is a four-step
* procedure:
*
* (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
* (this is a no-op on ia64).
* (2) Clear the relevant portions of the page-table
* (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
* (4) Release the pages that were freed up in step (2).
*
* Note that the ordering of these steps is crucial to avoid races on MP machines.
*
* The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
* tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
* for each page-table-entry PTE that needs to be removed do {
* tlb_remove_tlb_entry(tlb, pte, address);
* if (pte refers to a normal page) {
* tlb_remove_page(tlb, page);
* }
* }
* tlb_end_vma(tlb, vma);
* }
* }
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_SMP
# define FREE_PTE_NR 2048
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
#else
# define FREE_PTE_NR 0
# define tlb_fast_mode(tlb) (1)
#endif
typedef
struct
{
struct
mm_struct
*
mm
;
unsigned
long
nr
;
/* == ~0UL => fast mode */
unsigned
long
freed
;
/* number of pages freed */
unsigned
long
start_addr
;
unsigned
long
end_addr
;
struct
page
*
pages
[
FREE_PTE_NR
];
}
mmu_gather_t
;
/* Users of the generic TLB shootdown code must declare this storage space. */
extern
mmu_gather_t
mmu_gathers
[
NR_CPUS
];
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
*/
static
inline
void
ia64_tlb_flush_mmu
(
mmu_gather_t
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
nr
;
if
(
unlikely
(
end
-
start
>=
1024
*
1024
*
1024
*
1024UL
||
rgn_index
(
start
)
!=
rgn_index
(
end
-
1
)))
{
/*
* If we flush more than a tera-byte or across regions, we're probably
* better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/
flush_tlb_all
();
}
else
{
/*
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
* vma pointer.
*/
struct
vm_area_struct
vma
;
vma
.
vm_mm
=
tlb
->
mm
;
/* flush the address range from the tlb: */
flush_tlb_range
(
&
vma
,
start
,
end
);
/* now flush the virt. page-table area mapping the address range: */
flush_tlb_range
(
&
vma
,
ia64_thash
(
start
),
ia64_thash
(
end
));
}
/* lastly, release the freed pages */
nr
=
tlb
->
nr
;
if
(
!
tlb_fast_mode
(
tlb
))
{
unsigned
long
i
;
tlb
->
nr
=
0
;
tlb
->
start_addr
=
~
0UL
;
for
(
i
=
0
;
i
<
nr
;
++
i
)
free_page_and_swap_cache
(
tlb
->
pages
[
i
]);
}
}
/*
* Return a pointer to an initialized mmu_gather_t.
*/
static
inline
mmu_gather_t
*
tlb_gather_mmu
(
struct
mm_struct
*
mm
)
{
mmu_gather_t
*
tlb
=
&
mmu_gathers
[
smp_processor_id
()];
tlb
->
mm
=
mm
;
tlb
->
freed
=
0
;
tlb
->
start_addr
=
~
0UL
;
/* Use fast mode if only one CPU is online */
tlb
->
nr
=
smp_num_cpus
>
1
?
0UL
:
~
0UL
;
return
tlb
;
}
/*
* Called at the end of the shootdown operation to free up any resources that were
* collected. The page table lock is still held at this point.
*/
static
inline
void
tlb_finish_mmu
(
mmu_gather_t
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
freed
=
tlb
->
freed
;
struct
mm_struct
*
mm
=
tlb
->
mm
;
unsigned
long
rss
=
mm
->
rss
;
if
(
rss
<
freed
)
freed
=
rss
;
mm
->
rss
=
rss
-
freed
;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
*/
ia64_tlb_flush_mmu
(
tlb
,
start
,
end
);
/* keep the page table cache within bounds */
check_pgt_cache
();
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static
inline
void
tlb_remove_tlb_entry
(
mmu_gather_t
*
tlb
,
pte_t
pte
,
unsigned
long
address
)
{
if
(
tlb
->
start_addr
==
~
0UL
)
tlb
->
start_addr
=
address
;
tlb
->
end_addr
=
address
+
PAGE_SIZE
;
}
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
static
inline
void
tlb_remove_page
(
mmu_gather_t
*
tlb
,
struct
page
*
page
)
{
if
(
tlb_fast_mode
(
tlb
))
{
free_page_and_swap_cache
(
page
);
return
;
}
tlb
->
pages
[
tlb
->
nr
++
]
=
page
;
if
(
tlb
->
nr
>=
FREE_PTE_NR
)
ia64_tlb_flush_mmu
(
tlb
,
tlb
->
start_addr
,
tlb
->
end_addr
);
}
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#
include <asm-generic/tlb.h>
#
endif
/* _ASM_IA64_TLB_H */
include/asm-ia64/tlbflush.h
View file @
8beb1642
...
@@ -70,12 +70,10 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
...
@@ -70,12 +70,10 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
static
inline
void
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
{
struct
vm_area_struct
vma
;
/*
* Deprecated. The virtual page table is now flushed via the normal gather/flush
if
(
REGION_NUMBER
(
start
)
!=
REGION_NUMBER
(
end
))
* interface (see tlb.h).
printk
(
"flush_tlb_pgtables: can't flush across regions!!
\n
"
);
*/
vma
.
vm_mm
=
mm
;
flush_tlb_range
(
&
vma
,
ia64_thash
(
start
),
ia64_thash
(
end
));
}
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
/* XXX fix me */
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
/* XXX fix me */
...
...
include/asm-ia64/unistd.h
View file @
8beb1642
...
@@ -223,6 +223,10 @@
...
@@ -223,6 +223,10 @@
#define __NR_sched_setaffinity 1231
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
#define __NR_sched_getaffinity 1232
#define __NR_security 1233
#define __NR_security 1233
#define __NR_get_large_pages 1234
#define __NR_free_large_pages 1235
#define __NR_share_large_pages 1236
#define __NR_unshare_large_pages 1237
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment