Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8beb1642
Commit
8beb1642
authored
Jul 29, 2002
by
David Mosberger
Browse files
Options
Browse Files
Download
Plain Diff
ia64: Manual merge.
parents
12ebbff8
cb1a895f
Changes
59
Hide whitespace changes
Inline
Side-by-side
Showing
59 changed files
with
1899 additions
and
901 deletions
+1899
-901
arch/ia64/Makefile
arch/ia64/Makefile
+1
-1
arch/ia64/config.in
arch/ia64/config.in
+43
-71
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+156
-329
arch/ia64/hp/sim/hpsim_console.c
arch/ia64/hp/sim/hpsim_console.c
+6
-6
arch/ia64/hp/sim/hpsim_irq.c
arch/ia64/hp/sim/hpsim_irq.c
+8
-8
arch/ia64/hp/sim/simserial.c
arch/ia64/hp/sim/simserial.c
+1
-0
arch/ia64/hp/zx1/hpzx1_machvec.c
arch/ia64/hp/zx1/hpzx1_machvec.c
+0
-2
arch/ia64/ia32/binfmt_elf32.c
arch/ia64/ia32/binfmt_elf32.c
+1
-1
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+64
-57
arch/ia64/kernel/efi.c
arch/ia64/kernel/efi.c
+123
-24
arch/ia64/kernel/init_task.c
arch/ia64/kernel/init_task.c
+2
-2
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+21
-22
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+10
-3
arch/ia64/kernel/irq_lsapic.c
arch/ia64/kernel/irq_lsapic.c
+8
-8
arch/ia64/kernel/machvec.c
arch/ia64/kernel/machvec.c
+6
-3
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca.c
+12
-12
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/mca_asm.S
+2
-2
arch/ia64/kernel/pci.c
arch/ia64/kernel/pci.c
+26
-1
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+61
-56
arch/ia64/kernel/perfmon_itanium.h
arch/ia64/kernel/perfmon_itanium.h
+99
-0
arch/ia64/kernel/perfmon_mckinley.h
arch/ia64/kernel/perfmon_mckinley.h
+134
-0
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+5
-0
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+16
-5
arch/ia64/kernel/signal.c
arch/ia64/kernel/signal.c
+1
-0
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+2
-2
arch/ia64/kernel/time.c
arch/ia64/kernel/time.c
+15
-14
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+32
-9
arch/ia64/kernel/unwind.c
arch/ia64/kernel/unwind.c
+10
-9
arch/ia64/lib/Makefile
arch/ia64/lib/Makefile
+4
-4
arch/ia64/lib/copy_user.S
arch/ia64/lib/copy_user.S
+5
-3
arch/ia64/lib/io.c
arch/ia64/lib/io.c
+9
-0
arch/ia64/lib/memcpy_mck.S
arch/ia64/lib/memcpy_mck.S
+674
-0
arch/ia64/lib/swiotlb.c
arch/ia64/lib/swiotlb.c
+15
-15
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+17
-4
arch/ia64/mm/tlb.c
arch/ia64/mm/tlb.c
+10
-5
arch/ia64/sn/io/ifconfig_net.c
arch/ia64/sn/io/ifconfig_net.c
+3
-3
arch/ia64/sn/io/pciba.c
arch/ia64/sn/io/pciba.c
+14
-14
arch/ia64/sn/io/sn1/hubcounters.c
arch/ia64/sn/io/sn1/hubcounters.c
+1
-1
arch/ia64/sn/io/sn1/pcibr.c
arch/ia64/sn/io/sn1/pcibr.c
+16
-16
arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
+16
-16
arch/ia64/sn/kernel/setup.c
arch/ia64/sn/kernel/setup.c
+11
-11
include/asm-ia64/bitops.h
include/asm-ia64/bitops.h
+1
-1
include/asm-ia64/delay.h
include/asm-ia64/delay.h
+1
-1
include/asm-ia64/keyboard.h
include/asm-ia64/keyboard.h
+2
-0
include/asm-ia64/kregs.h
include/asm-ia64/kregs.h
+9
-0
include/asm-ia64/machvec.h
include/asm-ia64/machvec.h
+1
-0
include/asm-ia64/machvec_init.h
include/asm-ia64/machvec_init.h
+1
-0
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+24
-5
include/asm-ia64/offsets.h
include/asm-ia64/offsets.h
+0
-130
include/asm-ia64/pci.h
include/asm-ia64/pci.h
+1
-1
include/asm-ia64/perfmon.h
include/asm-ia64/perfmon.h
+2
-3
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+3
-7
include/asm-ia64/scatterlist.h
include/asm-ia64/scatterlist.h
+3
-3
include/asm-ia64/softirq.h
include/asm-ia64/softirq.h
+1
-0
include/asm-ia64/suspend.h
include/asm-ia64/suspend.h
+0
-0
include/asm-ia64/system.h
include/asm-ia64/system.h
+3
-1
include/asm-ia64/tlb.h
include/asm-ia64/tlb.h
+179
-4
include/asm-ia64/tlbflush.h
include/asm-ia64/tlbflush.h
+4
-6
include/asm-ia64/unistd.h
include/asm-ia64/unistd.h
+4
-0
No files found.
arch/ia64/Makefile
View file @
8beb1642
...
...
@@ -26,7 +26,7 @@ CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION
=
$(
shell
$(CC)
-v
2>&1 | fgrep
'gcc version'
|
cut
-f3
-d
' '
|
cut
-f1
-d
'.'
)
ifneq
($(GCC_VERSION),2)
CFLAGS
+=
-frename-registers
--param
max-inline-insns
=
2
000
CFLAGS
+=
-frename-registers
--param
max-inline-insns
=
5
000
endif
ifeq
($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
...
...
arch/ia64/config.in
View file @
8beb1642
...
...
@@ -64,12 +64,13 @@ if [ "$CONFIG_MCKINLEY" = "y" ]; then
fi
fi
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ]; then
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ];
then
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y
fi
if [ "$CONFIG_IA64_SGI_SN1" = "y" -o
"$CONFIG_IA64_SGI_SN2" = "y" ]; then
if [ "$CONFIG_IA64_SGI_SN1" = "y" -o "$CONFIG_IA64_SGI_SN2" = "y" ]; then
define_bool CONFIG_IA64_SGI_SN y
bool ' Enable extra debugging code' CONFIG_IA64_SGI_SN_DEBUG n
bool ' Enable SGI Medusa Simulator Support' CONFIG_IA64_SGI_SN_SIM
...
...
@@ -99,21 +100,21 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/acpi/Config.in
source drivers/acpi/Config.in
bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in
bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
if [ "$CONFIG_HOTPLUG" = "y" ]; then
source drivers/pcmcia/Config.in
else
define_bool CONFIG_PCMCIA n
fi
source drivers/parport/Config.in
bool 'Support for hot-pluggable devices' CONFIG_HOTPLUG
if [ "$CONFIG_HOTPLUG" = "y" ]; then
source drivers/hotplug/Config.in
source drivers/pcmcia/Config.in
else
define_bool CONFIG_PCMCIA n
fi
source drivers/parport/Config.in
fi # !HP_SIM
endmenu
...
...
@@ -124,38 +125,17 @@ fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/mtd/Config.in
source drivers/pnp/Config.in
source drivers/block/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
source drivers/md/Config.in
source drivers/message/fusion/Config.in
mainmenu_option next_comment
comment 'ATA/ATAPI/MFM/RLL support'
tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE
if [ "$CONFIG_IDE" != "n" ]; then
source drivers/ide/Config.in
else
define_bool CONFIG_BLK_DEV_HD n
fi
endmenu
else # ! HP_SIM
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi
endmenu
fi # !HP_SIM
mainmenu_option next_comment
comment 'SCSI support'
...
...
@@ -168,31 +148,26 @@ fi
endmenu
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in
fi
endmenu
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
bool 'Network device support' CONFIG_NETDEVICES
if [ "$CONFIG_NETDEVICES" = "y" ]; then
source drivers/net/Config.in
fi
endmenu
fi
source net/ax25/Config.in
source drivers/isdn/Config.in
mainmenu_option next_comment
comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)'
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/cdrom/Config.in
fi
endmenu
fi
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
fi # !HP_SIM
#
...
...
@@ -220,21 +195,18 @@ fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
mainmenu_option next_comment
comment 'Sound'
tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in
fi
endmenu
source drivers/usb/Config.in
source lib/Config.in
mainmenu_option next_comment
comment 'Sound'
source net/bluetooth/Config.in
tristate 'Sound card support' CONFIG_SOUND
if [ "$CONFIG_SOUND" != "n" ]; then
source sound/Config.in
fi
endmenu
source drivers/usb/Config.in
source lib/Config.in
source net/bluetooth/Config.in
fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
...
...
arch/ia64/hp/common/sba_iommu.c
View file @
8beb1642
...
...
@@ -2,6 +2,7 @@
** IA64 System Bus Adapter (SBA) I/O MMU manager
**
** (c) Copyright 2002 Alex Williamson
** (c) Copyright 2002 Grant Grundler
** (c) Copyright 2002 Hewlett-Packard Company
**
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
...
...
@@ -110,7 +111,7 @@
*/
#define DELAYED_RESOURCE_CNT 16
#define DEFAULT_DMA_HINT_REG 0
#define DEFAULT_DMA_HINT_REG
(d)
0
#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
...
...
@@ -216,9 +217,10 @@ static int sba_count;
static
int
reserve_sba_gart
=
1
;
static
struct
pci_dev
sac_only_dev
;
#define sba_sg_
iova(sg) (sg->address
)
#define sba_sg_
address(sg) (page_address((sg)->page) + (sg)->offset
)
#define sba_sg_len(sg) (sg->length)
#define sba_sg_buffer(sg) (sg->orig_address)
#define sba_sg_iova(sg) (sg->dma_address)
#define sba_sg_iova_len(sg) (sg->dma_length)
/* REVISIT - fix me for multiple SBAs/IOCs */
#define GET_IOC(dev) (sba_list->ioc)
...
...
@@ -232,7 +234,7 @@ static struct pci_dev sac_only_dev;
** rather than the HW. I/O MMU allocation alogorithms can be
** faster with smaller size is (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*
PAGE
_SIZE)
#define DMA_CHUNK_SIZE (BITS_PER_LONG*
IOVP
_SIZE)
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
...
...
@@ -255,7 +257,7 @@ static struct pci_dev sac_only_dev;
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
*
* Print the size/location of the IO MMU P
DIR
.
* Print the size/location of the IO MMU P
dir
.
*/
static
void
sba_dump_tlb
(
char
*
hpa
)
...
...
@@ -273,12 +275,12 @@ sba_dump_tlb(char *hpa)
#ifdef ASSERT_PDIR_SANITY
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU P
DIR
entry
* sba_dump_pdir_entry - debugging only - print one IOMMU P
dir
entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @pide: pdir index.
*
* Print one entry of the IO MMU P
DIR
in human readable form.
* Print one entry of the IO MMU P
dir
in human readable form.
*/
static
void
sba_dump_pdir_entry
(
struct
ioc
*
ioc
,
char
*
msg
,
uint
pide
)
...
...
@@ -360,25 +362,25 @@ sba_check_pdir(struct ioc *ioc, char *msg)
* print the SG list so we can verify it's correct by hand.
*/
static
void
sba_dump_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_dump_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
while
(
nents
--
>
0
)
{
printk
(
" %d :
%08lx/%05x
%p
\n
"
,
printk
(
" %d :
DMA %08lx/%05x CPU
%p
\n
"
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
sba_sg_len
(
startsg
),
sba_sg_
buffer
(
startsg
));
sba_sg_
iova_
len
(
startsg
),
sba_sg_
address
(
startsg
));
startsg
++
;
}
}
static
void
sba_check_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_check_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
struct
scatterlist
*
the_sg
=
startsg
;
int
the_nents
=
nents
;
while
(
the_nents
--
>
0
)
{
if
(
sba_sg_
buffer
(
the_sg
)
==
0x0UL
)
if
(
sba_sg_
address
(
the_sg
)
==
0x0UL
)
sba_dump_sg
(
NULL
,
startsg
,
nents
);
the_sg
++
;
}
...
...
@@ -404,7 +406,6 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
/* FIXME : review these macros to verify correctness and usage */
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define RESMAP_MASK(n) ~(~0UL << (n))
...
...
@@ -412,7 +413,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
/**
* sba_search_bitmap - find free space in IO P
DIR
resource bitmap
* sba_search_bitmap - find free space in IO P
dir
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
*
...
...
@@ -449,7 +450,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path.
*/
unsigned
long
o
=
1
<<
get_order
(
bits_wanted
<<
PAGE
_SHIFT
);
unsigned
long
o
=
1
<<
get_order
(
bits_wanted
<<
IOVP
_SHIFT
);
uint
bitshiftcnt
=
ROUNDUP
(
ioc
->
res_bitshift
,
o
);
unsigned
long
mask
;
...
...
@@ -495,7 +496,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
/**
* sba_alloc_range - find free bits and mark them in IO P
DIR
resource bitmap
* sba_alloc_range - find free bits and mark them in IO P
dir
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
*
...
...
@@ -557,7 +558,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
/**
* sba_free_range - unmark bits in IO P
DIR
resource bitmap
* sba_free_range - unmark bits in IO P
dir
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
...
...
@@ -604,14 +605,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
/**
* sba_io_pdir_entry - fill in one IO P
DIR
entry
* @pdir_ptr: pointer to IO P
DIR
entry
* @
vba: Virtual CPU address of buffer
to map
* sba_io_pdir_entry - fill in one IO P
dir
entry
* @pdir_ptr: pointer to IO P
dir
entry
* @
phys_page: phys CPU address of page
to map
*
* SBA Mapping Routine
*
* Given a
virtual address (vba
, arg1) sba_io_pdir_entry()
* loads the I/O P
DIR
entry pointed to by pdir_ptr (arg0).
* Given a
physical address (phys_page
, arg1) sba_io_pdir_entry()
* loads the I/O P
dir
entry pointed to by pdir_ptr (arg0).
* Each IO Pdir entry consists of 8 bytes as shown below
* (LSB == bit 0):
*
...
...
@@ -623,20 +624,12 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
* V == Valid Bit
* U == Unused
* PPN == Physical Page Number
*
* The physical address fields are filled with the results of virt_to_phys()
* on the vba.
*/
#if 1
#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL)
#else
void
SBA_INLINE
sba_io_pdir_entry
(
u64
*
pdir_ptr
,
unsigned
long
vba
)
{
*
pdir_ptr
=
((
vba
&
~
0xE000000000000FFFULL
)
|
0x80000000000000FFULL
);
}
#endif
#define SBA_VALID_MASK 0x80000000000000FFULL
#define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK)
#define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK)
#ifdef ENABLE_MARK_CLEAN
/**
...
...
@@ -660,12 +653,12 @@ mark_clean (void *addr, size_t size)
#endif
/**
* sba_mark_invalid - invalidate one or more IO P
DIR
entries
* sba_mark_invalid - invalidate one or more IO P
dir
entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
*
* Marking the IO P
DIR
entry(ies) as Invalid and invalidate
* Marking the IO P
dir
entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
*
...
...
@@ -700,14 +693,14 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iovp
|=
IOVP_SHIFT
;
/* set "size" field for PCOM */
/*
** clear I/O P
DIR
entry "valid" bit
** clear I/O P
dir
entry "valid" bit
** Do NOT clear the rest - save it for debugging.
** We should only clear bits that have previously
** been enabled.
*/
ioc
->
pdir_base
[
off
]
&=
~
(
0x80000000000000FFULL
)
;
ioc
->
pdir_base
[
off
]
&=
~
SBA_VALID_MASK
;
}
else
{
u32
t
=
get_order
(
byte_cnt
)
+
PAGE
_SHIFT
;
u32
t
=
get_order
(
byte_cnt
)
+
IOVP
_SHIFT
;
iovp
|=
t
;
ASSERT
(
t
<=
31
);
/* 2GB! Max value of "size" field */
...
...
@@ -716,7 +709,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
/* verify this pdir entry is enabled */
ASSERT
(
ioc
->
pdir_base
[
off
]
>>
63
);
/* clear I/O Pdir entry "valid" bit first */
ioc
->
pdir_base
[
off
]
&=
~
(
0x80000000000000FFULL
)
;
ioc
->
pdir_base
[
off
]
&=
~
SBA_VALID_MASK
;
off
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
...
...
@@ -744,7 +737,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
u64
*
pdir_start
;
int
pide
;
#ifdef ALLOW_IOV_BYPASS
unsigned
long
p
ci
_addr
=
virt_to_phys
(
addr
);
unsigned
long
p
hys
_addr
=
virt_to_phys
(
addr
);
#endif
ioc
=
GET_IOC
(
dev
);
...
...
@@ -754,7 +747,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
if
((
p
ci
_addr
&
~
dev
->
dma_mask
)
==
0
)
{
if
((
p
hys
_addr
&
~
dev
->
dma_mask
)
==
0
)
{
/*
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
...
...
@@ -765,8 +758,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
DBG_BYPASS
(
"sba_map_single() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
dev
->
dma_mask
,
p
ci
_addr
);
return
p
ci
_addr
;
dev
->
dma_mask
,
p
hys
_addr
);
return
p
hys
_addr
;
}
#endif
...
...
@@ -799,7 +792,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
while
(
size
>
0
)
{
ASSERT
(((
u8
*
)
pdir_start
)[
7
]
==
0
);
/* verify availability */
sba_io_pdir_entry
(
pdir_start
,
(
unsigned
long
)
addr
);
sba_io_pdir_entry
(
pdir_start
,
virt_to_phys
(
addr
));
DBG_RUN
(
" pdir 0x%p %lx
\n
"
,
pdir_start
,
*
pdir_start
);
...
...
@@ -812,7 +806,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
sba_check_pdir
(
ioc
,
"Check after sba_map_single()"
);
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
(
direction
)
);
}
/**
...
...
@@ -866,6 +860,29 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
size
+=
offset
;
size
=
ROUNDUP
(
size
,
IOVP_SIZE
);
#ifdef ENABLE_MARK_CLEAN
/*
** Don't need to hold the spinlock while telling VM pages are "clean".
** The pages are "busy" in the resource map until we mark them free.
** But tell VM pages are clean *before* releasing the resource
** in order to avoid race conditions.
*/
if
(
direction
==
PCI_DMA_FROMDEVICE
)
{
u32
iovp
=
(
u32
)
SBA_IOVP
(
ioc
,
iova
);
unsigned
int
pide
=
PDIR_INDEX
(
iovp
);
u64
*
pdirp
=
&
(
ioc
->
pdir_base
[
pide
]);
size_t
byte_cnt
=
size
;
void
*
addr
;
do
{
addr
=
phys_to_virt
(
sba_io_page
(
pdirp
));
mark_clean
(
addr
,
min
(
byte_cnt
,
IOVP_SIZE
));
pdirp
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
#endif
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#ifdef CONFIG_PROC_FS
ioc
->
usingle_calls
++
;
...
...
@@ -891,40 +908,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
sba_free_range
(
ioc
,
iova
,
size
);
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
#endif
/* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN
if
(
direction
==
PCI_DMA_FROMDEVICE
)
{
u32
iovp
=
(
u32
)
SBA_IOVP
(
ioc
,
iova
);
int
off
=
PDIR_INDEX
(
iovp
);
void
*
addr
;
if
(
size
<=
IOVP_SIZE
)
{
addr
=
phys_to_virt
(
ioc
->
pdir_base
[
off
]
&
~
0xE000000000000FFFULL
);
mark_clean
(
addr
,
size
);
}
else
{
size_t
byte_cnt
=
size
;
do
{
addr
=
phys_to_virt
(
ioc
->
pdir_base
[
off
]
&
~
0xE000000000000FFFULL
);
mark_clean
(
addr
,
min
(
byte_cnt
,
IOVP_SIZE
));
off
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
}
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
}
...
...
@@ -980,242 +964,109 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x1UL
#ifdef DEBUG_LARGE_SG_ENTRIES
int
dump_run_sg
=
0
;
#endif
/**
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static
SBA_INLINE
int
sba_fill_pdir
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
struct
scatterlist
*
dma_sg
=
startsg
;
/* pointer to current DMA */
int
n_mappings
=
0
;
u64
*
pdirp
=
0
;
unsigned
long
dma_offset
=
0
;
dma_sg
--
;
while
(
nents
--
>
0
)
{
int
cnt
=
sba_sg_len
(
startsg
);
sba_sg_len
(
startsg
)
=
0
;
#ifdef DEBUG_LARGE_SG_ENTRIES
if
(
dump_run_sg
)
printk
(
" %2d : %08lx/%05x %p
\n
"
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
cnt
,
sba_sg_buffer
(
startsg
)
);
#else
DBG_RUN_SG
(
" %d : %08lx/%05x %p
\n
"
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
cnt
,
sba_sg_buffer
(
startsg
)
);
#endif
/*
** Look for the start of a new DMA stream
*/
if
((
u64
)
sba_sg_iova
(
startsg
)
&
PIDE_FLAG
)
{
u32
pide
=
(
u64
)
sba_sg_iova
(
startsg
)
&
~
PIDE_FLAG
;
dma_offset
=
(
unsigned
long
)
pide
&
~
IOVP_MASK
;
sba_sg_iova
(
startsg
)
=
0
;
dma_sg
++
;
sba_sg_iova
(
dma_sg
)
=
(
char
*
)(
pide
|
ioc
->
ibase
);
pdirp
=
&
(
ioc
->
pdir_base
[
pide
>>
IOVP_SHIFT
]);
n_mappings
++
;
}
/*
** Look for a VCONTIG chunk
*/
if
(
cnt
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
sba_sg_buffer
(
startsg
);
ASSERT
(
pdirp
);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
sba_sg_len
(
dma_sg
)
+=
cnt
;
cnt
+=
dma_offset
;
dma_offset
=
0
;
/* only want offset on first chunk */
cnt
=
ROUNDUP
(
cnt
,
IOVP_SIZE
);
#ifdef CONFIG_PROC_FS
ioc
->
msg_pages
+=
cnt
>>
IOVP_SHIFT
;
#endif
do
{
sba_io_pdir_entry
(
pdirp
,
vaddr
);
vaddr
+=
IOVP_SIZE
;
cnt
-=
IOVP_SIZE
;
pdirp
++
;
}
while
(
cnt
>
0
);
}
startsg
++
;
}
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg
=
0
;
#endif
return
(
n_mappings
);
}
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on a page boundry.
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
#define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page)
#define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG))
/**
* sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg:
list of IOVA/size pairs
* @startsg:
input=SG list output=DMA addr/len pairs filled in
* @nents: number of entries in startsg list
* @direction: R/W or both.
*
* First pass is to walk the SG list and determine where the breaks are
* in the DMA stream. Allocates PDIR entries but does not fill them.
* Returns the number of DMA chunks.
* Walk the SG list and determine where the breaks are in the DMA stream.
* Allocate IO Pdir resources and fill them in separate loop.
* Returns the number of DMA streams used for output IOVA list.
* Note each DMA stream can consume multiple IO Pdir entries.
*
* Doing the fill seperate from the coalescing/allocation keeps the
* code simpler. Future enhancement could make one pass through
* the sglist do both.
* Code is written assuming some coalescing is possible.
*/
static
SBA_INLINE
int
sba_coalesce_chunks
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_coalesce_chunks
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
,
int
direction
)
{
struct
scatterlist
*
vcontig_sg
;
/* VCONTIG chunk head */
unsigned
long
vcontig_len
;
/* len of VCONTIG chunk */
unsigned
long
vcontig_end
;
struct
scatterlist
*
dma_sg
;
/* next DMA stream head */
unsigned
long
dma_offset
,
dma_len
;
/* start/len of DMA stream */
struct
scatterlist
*
dma_sg
=
startsg
;
/* return array */
int
n_mappings
=
0
;
while
(
nents
>
0
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
(
startsg
->
address
);
ASSERT
(
nents
>
1
);
do
{
unsigned
int
dma_cnt
=
1
;
/* number of pages in DMA stream */
unsigned
int
pide
;
/* index into IO Pdir array */
u64
*
pdirp
;
/* pointer into IO Pdir array */
unsigned
long
dma_offset
,
dma_len
;
/* cumulative DMA stream */
/*
** Prepare for first/next DMA stream
*/
dma_sg
=
vcontig_sg
=
startsg
;
dma_len
=
vcontig_len
=
vcontig_end
=
sba_sg_len
(
startsg
);
vcontig_end
+=
vaddr
;
dma_offset
=
vaddr
&
~
IOVP_MASK
;
/* PARANOID: clear entries */
sba_sg_buffer
(
startsg
)
=
sba_sg_iova
(
startsg
);
sba_sg_iova
(
startsg
)
=
0
;
sba_sg_len
(
startsg
)
=
0
;
dma_len
=
sba_sg_len
(
startsg
);
dma_offset
=
sba_sg_address
(
startsg
);
startsg
++
;
nents
--
;
/*
** This loop terminates one iteration "early" since
** it's always looking one "ahead".
** We want to know how many entries can be coalesced
** before trying to allocate IO Pdir space.
** IOVAs can then be allocated "naturally" aligned
** to take advantage of the block IO TLB flush.
*/
while
(
--
nents
>
0
)
{
unsigned
long
vaddr
;
/* tmp */
while
(
nents
)
{
unsigned
int
end_offset
=
dma_offset
+
dma_len
;
startsg
++
;
/* catch brokenness in SCSI layer */
ASSERT
(
startsg
->
length
<=
DMA_CHUNK_SIZE
);
/* prev entry must end on a page boundary */
if
(
end_offset
&
IOVP_MASK
)
break
;
/*
** First make sure current dma stream won't
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
if
(((
dma_len
+
dma_offset
+
startsg
->
length
+
~
IOVP_MASK
)
&
IOVP_MASK
)
>
DMA_CHUNK_SIZE
)
/* next entry start on a page boundary? */
if
(
startsg
->
offset
)
break
;
/*
** Then look for virtually contiguous blocks.
**
** append the next transaction?
** make sure current dma stream won't exceed
** DMA_CHUNK_SIZE if coalescing entries.
*/
vaddr
=
(
unsigned
long
)
sba_sg_iova
(
startsg
);
if
(
vcontig_end
==
vaddr
)
{
vcontig_len
+=
sba_sg_len
(
startsg
);
vcontig_end
+=
sba_sg_len
(
startsg
);
dma_len
+=
sba_sg_len
(
startsg
);
sba_sg_buffer
(
startsg
)
=
(
char
*
)
vaddr
;
sba_sg_iova
(
startsg
)
=
0
;
sba_sg_len
(
startsg
)
=
0
;
continue
;
}
if
(((
end_offset
+
startsg
->
length
+
~
IOVP_MASK
)
&
IOVP_MASK
)
>
DMA_CHUNK_SIZE
)
break
;
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg
=
(
vcontig_len
>
IOVP_SIZE
);
#endif
dma_len
+=
sba_sg_len
(
startsg
);
startsg
++
;
nents
--
;
dma_cnt
++
;
}
/*
** Not virtually contigous.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
sba_sg_len
(
vcontig_sg
)
=
vcontig_len
;
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
vcontig_sg
=
startsg
;
vcontig_len
=
sba_sg_len
(
startsg
);
/* allocate IO Pdir resource.
** returns index into (u64) IO Pdir array.
** IOVA is formed from this.
*/
pide
=
sba_alloc_range
(
ioc
,
dma_cnt
<<
IOVP_SHIFT
);
pdirp
=
&
(
ioc
->
pdir_base
[
pide
]);
/*
** 3) do the entries end/start on page boundaries?
** Don't update vcontig_end until we've checked.
*/
if
(
DMA_CONTIG
(
vcontig_end
,
vaddr
))
{
vcontig_end
=
vcontig_len
+
vaddr
;
dma_len
+=
vcontig_len
;
sba_sg_buffer
(
startsg
)
=
(
char
*
)
vaddr
;
sba_sg_iova
(
startsg
)
=
0
;
continue
;
}
else
{
break
;
}
/* fill_pdir: write stream into IO Pdir */
while
(
dma_cnt
--
)
{
sba_io_pdir_entry
(
pdirp
,
SG_ENT_PHYS_PAGE
(
startsg
));
startsg
++
;
pdirp
++
;
}
/*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
sba_sg_len
(
vcontig_sg
)
=
vcontig_len
;
dma_len
=
(
dma_len
+
dma_offset
+
~
IOVP_MASK
)
&
IOVP_MASK
;
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
sba_sg_iova
(
dma_sg
)
=
(
char
*
)
(
PIDE_FLAG
|
(
sba_alloc_range
(
ioc
,
dma_len
)
<<
IOVP_SHIFT
)
|
dma_offset
);
/* "output" IOVA */
sba_sg_iova
(
dma_sg
)
=
SBA_IOVA
(
ioc
,
((
dma_addr_t
)
pide
<<
IOVP_SHIFT
),
dma_offset
,
DEFAULT_DMA_HINT_REG
(
direction
));
sba_sg_iova_len
(
dma_sg
)
=
dma_len
;
dma_sg
++
;
n_mappings
++
;
}
}
while
(
nents
);
return
n_mappings
;
}
...
...
@@ -1223,7 +1074,7 @@ sba_coalesce_chunks( struct ioc *ioc,
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
* @dev: instance of PCI
device
owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @direction: R/W or both.
...
...
@@ -1234,42 +1085,46 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
int
direction
)
{
struct
ioc
*
ioc
;
int
coalesced
,
filled
=
0
;
int
filled
=
0
;
unsigned
long
flags
;
#ifdef ALLOW_IOV_BYPASS
struct
scatterlist
*
sg
;
#endif
DBG_RUN_SG
(
"%s() START %d entries
\n
"
,
__FUNCTION__
,
nents
);
DBG_RUN_SG
(
"%s() START %d entries, 0x%p,0x%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_address
(
sglist
),
sba_sg_len
(
sglist
));
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
#ifdef ALLOW_IOV_BYPASS
if
(
dev
->
dma_mask
>=
ioc
->
dma_mask
)
{
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
){
sba_sg_
buffer
(
sg
)
=
sba_sg_iova
(
sg
);
sba_sg_iova
(
sg
)
=
(
char
*
)
virt_to_phys
(
sba_sg_buffer
(
sg
)
);
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
)
{
sba_sg_
iova
(
sg
)
=
virt_to_phys
(
sba_sg_address
(
sg
)
);
sba_sg_iova
_len
(
sg
)
=
sba_sg_len
(
sg
);
}
#ifdef CONFIG_PROC_FS
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
ioc
->
msg_bypass
++
;
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
DBG_RUN_SG
(
"%s() DONE %d mappings bypassed
\n
"
,
__FUNCTION__
,
filled
);
return
filled
;
}
#endif
/* Fast path single entry scatterlists. */
if
(
nents
==
1
)
{
sba_sg_buffer
(
sglist
)
=
sba_sg_iova
(
sglist
);
sba_sg_iova
(
sglist
)
=
(
char
*
)
sba_map_single
(
dev
,
sba_sg_
buffer
(
sglist
),
sba_sg_
iova
(
sglist
),
sba_sg_len
(
sglist
),
direction
);
sba_sg_iova_len
(
sglist
)
=
sba_sg_len
(
sglist
);
#ifdef CONFIG_PROC_FS
/*
** Should probably do some stats counting, but trying to
** be precise quickly starts wasting CPU time.
*/
#endif
DBG_RUN_SG
(
"%s() DONE 1 mapping
\n
"
,
__FUNCTION__
);
return
1
;
}
...
...
@@ -1286,26 +1141,11 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
#ifdef CONFIG_PROC_FS
ioc
->
msg_calls
++
;
#endif
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced
=
sba_coalesce_chunks
(
ioc
,
sglist
,
nents
);
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
** coalesce and program the I/O Pdir
*/
filled
=
sba_
fill_pdir
(
ioc
,
sglist
,
nents
);
filled
=
sba_
coalesce_chunks
(
ioc
,
sglist
,
nents
,
direction
);
#ifdef ASSERT_PDIR_SANITY
if
(
sba_check_pdir
(
ioc
,
"Check after sba_map_sg()"
))
...
...
@@ -1317,7 +1157,6 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
ASSERT
(
coalesced
==
filled
);
DBG_RUN_SG
(
"%s() DONE %d mappings
\n
"
,
__FUNCTION__
,
filled
);
return
filled
;
...
...
@@ -1341,8 +1180,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
unsigned
long
flags
;
#endif
DBG_RUN_SG
(
"%s() START %d entries,
%p,
%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_
buffer
(
sglist
),
sglist
->
length
);
DBG_RUN_SG
(
"%s() START %d entries,
0x%p,0x
%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_
address
(
sglist
),
sba_sg_len
(
sglist
)
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
...
...
@@ -1360,7 +1199,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
while
(
sba_sg_len
(
sglist
)
&&
nents
--
)
{
sba_unmap_single
(
dev
,
(
dma_addr_t
)
sba_sg_iova
(
sglist
),
sba_sg_len
(
sglist
),
direction
);
sba_sg_
iova_
len
(
sglist
),
direction
);
#ifdef CONFIG_PROC_FS
/*
** This leaves inconsistent data in the stats, but we can't
...
...
@@ -1368,7 +1207,7 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
** were coalesced to a single entry. The stats are fun,
** but speed is more important.
*/
ioc
->
usg_pages
+=
(((
u64
)
sba_sg_iova
(
sglist
)
&
~
IOVP_MASK
)
+
sba_sg_len
(
sglist
)
+
IOVP_SIZE
-
1
)
>>
PAGE
_SHIFT
;
ioc
->
usg_pages
+=
(((
u64
)
sba_sg_iova
(
sglist
)
&
~
IOVP_MASK
)
+
sba_sg_len
(
sglist
)
+
IOVP_SIZE
-
1
)
>>
IOVP
_SHIFT
;
#endif
++
sglist
;
}
...
...
@@ -1429,12 +1268,12 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
__FUNCTION__
,
ioc
->
ioc_hpa
,
iova_space_size
>>
20
,
iov_order
+
PAGE_SHIFT
,
ioc
->
pdir_size
);
/*
FIXME :
DMA HINTs not used */
/*
XXX
DMA HINTs not used */
ioc
->
hint_shift_pdir
=
iov_order
+
PAGE_SHIFT
;
ioc
->
hint_mask_pdir
=
~
(
0x3
<<
(
iov_order
+
PAGE_SHIFT
));
ioc
->
pdir_base
=
pdir_base
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
pdir_size
));
ioc
->
pdir_base
=
pdir_base
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
pdir_size
));
if
(
NULL
==
pdir_base
)
{
panic
(
__FILE__
":%s() could not allocate I/O Page Table
\n
"
,
__FUNCTION__
);
...
...
@@ -1452,20 +1291,8 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
/* build IMASK for IOC and Elroy */
iova_space_mask
=
0xffffffff
;
iova_space_mask
<<=
(
iov_order
+
PAGE
_SHIFT
);
iova_space_mask
<<=
(
iov_order
+
IOVP
_SHIFT
);
#ifdef CONFIG_IA64_HP_PROTO
/*
** REVISIT - this is a kludge, but we won't be supporting anything but
** zx1 2.0 or greater for real. When fw is in shape, ibase will
** be preprogrammed w/ the IOVA hole base and imask will give us
** the size.
*/
if
((
sba_dev
->
hw_rev
&
0xFF
)
<
0x20
)
{
DBG_INIT
(
"%s() Found SBA rev < 2.0, setting IOVA base to 0. This device will not be supported in the future.
\n
"
,
__FUNCTION__
);
ioc
->
ibase
=
0x0
;
}
else
#endif
ioc
->
ibase
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IBASE
)
&
0xFFFFFFFEUL
;
ioc
->
imask
=
iova_space_mask
;
/* save it */
...
...
@@ -1474,7 +1301,7 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
__FUNCTION__
,
ioc
->
ibase
,
ioc
->
imask
);
/*
**
FIXME: Hint
registers are programmed with default hint
**
XXX DMA HINT
registers are programmed with default hint
** values during boot, so hints should be sane even if we
** can't reprogram them the way drivers want.
*/
...
...
@@ -1487,8 +1314,8 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
*/
ioc
->
imask
|=
0xFFFFFFFF00000000UL
;
/* Set I/O P
DIR P
age size to system page size */
switch
(
PAGE
_SHIFT
)
{
/* Set I/O P
dir p
age size to system page size */
switch
(
IOVP
_SHIFT
)
{
case
12
:
/* 4K */
tcnfg
=
0
;
break
;
...
...
@@ -1636,7 +1463,7 @@ sba_common_init(struct sba_device *sba_dev)
res_word
=
(
int
)(
index
/
BITS_PER_LONG
);
mask
=
0x1UL
<<
(
index
-
(
res_word
*
BITS_PER_LONG
));
res_ptr
[
res_word
]
|=
mask
;
sba_dev
->
ioc
[
i
].
pdir_base
[
PDIR_INDEX
(
reserved_iov
)]
=
(
0x80000000000000FFULL
|
reserved_iov
);
sba_dev
->
ioc
[
i
].
pdir_base
[
PDIR_INDEX
(
reserved_iov
)]
=
(
SBA_VALID_MASK
|
reserved_iov
);
}
}
...
...
arch/ia64/hp/sim/hpsim_console.c
View file @
8beb1642
...
...
@@ -30,12 +30,12 @@ static void simcons_write (struct console *, const char *, unsigned);
static
kdev_t
simcons_console_device
(
struct
console
*
);
struct
console
hpsim_cons
=
{
name:
"simcons"
,
write:
simcons_write
,
device:
simcons_console_device
,
setup:
simcons_init
,
flags:
CON_PRINTBUFFER
,
index:
-
1
,
.
name
=
"simcons"
,
.
write
=
simcons_write
,
.
device
=
simcons_console_device
,
.
setup
=
simcons_init
,
.
flags
=
CON_PRINTBUFFER
,
.
index
=
-
1
,
};
static
int
...
...
arch/ia64/hp/sim/hpsim_irq.c
View file @
8beb1642
...
...
@@ -22,14 +22,14 @@ hpsim_irq_noop (unsigned int irq)
}
static
struct
hw_interrupt_type
irq_type_hp_sim
=
{
typename:
"hpsim"
,
startup:
hpsim_irq_startup
,
shutdown:
hpsim_irq_noop
,
enable:
hpsim_irq_noop
,
disable:
hpsim_irq_noop
,
ack:
hpsim_irq_noop
,
end:
hpsim_irq_noop
,
set_affinity:
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
hpsim_irq_noop
,
.
typename
=
"hpsim"
,
.
startup
=
hpsim_irq_startup
,
.
shutdown
=
hpsim_irq_noop
,
.
enable
=
hpsim_irq_noop
,
.
disable
=
hpsim_irq_noop
,
.
ack
=
hpsim_irq_noop
,
.
end
=
hpsim_irq_noop
,
.
set_affinity
=
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
hpsim_irq_noop
,
};
void
__init
...
...
arch/ia64/hp/sim/simserial.c
View file @
8beb1642
...
...
@@ -31,6 +31,7 @@
#include <linux/serialP.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/uaccess.h>
#ifdef CONFIG_KDB
...
...
arch/ia64/hp/zx1/hpzx1_machvec.c
View file @
8beb1642
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
#define MACHVEC_PLATFORM_NAME hpzx1
#include <asm/machvec_init.h>
arch/ia64/ia32/binfmt_elf32.c
View file @
8beb1642
...
...
@@ -67,7 +67,7 @@ ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int
}
static
struct
vm_operations_struct
ia32_shared_page_vm_ops
=
{
nopage:
ia32_install_shared_page
.
nopage
=
ia32_install_shared_page
};
void
...
...
arch/ia64/kernel/acpi.c
View file @
8beb1642
...
...
@@ -56,6 +56,8 @@ asm (".weak iosapic_version");
void
(
*
pm_idle
)
(
void
);
void
(
*
pm_power_off
)
(
void
);
unsigned
char
acpi_kbd_controller_present
=
1
;
const
char
*
acpi_get_sysname
(
void
)
{
...
...
@@ -206,7 +208,7 @@ struct acpi_table_madt * acpi_madt __initdata;
static
int
__init
acpi_parse_lapic_addr_ovr
(
acpi_table_entry_header
*
header
)
{
struct
acpi_table_lapic_addr_ovr
*
lapic
=
NULL
;
struct
acpi_table_lapic_addr_ovr
*
lapic
;
lapic
=
(
struct
acpi_table_lapic_addr_ovr
*
)
header
;
if
(
!
lapic
)
...
...
@@ -226,7 +228,7 @@ acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
static
int
__init
acpi_parse_lsapic
(
acpi_table_entry_header
*
header
)
{
struct
acpi_table_lsapic
*
lsapic
=
NULL
;
struct
acpi_table_lsapic
*
lsapic
;
lsapic
=
(
struct
acpi_table_lsapic
*
)
header
;
if
(
!
lsapic
)
...
...
@@ -262,7 +264,7 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
static
int
__init
acpi_parse_lapic_nmi
(
acpi_table_entry_header
*
header
)
{
struct
acpi_table_lapic_nmi
*
lacpi_nmi
=
NULL
;
struct
acpi_table_lapic_nmi
*
lacpi_nmi
;
lacpi_nmi
=
(
struct
acpi_table_lapic_nmi
*
)
header
;
if
(
!
lacpi_nmi
)
...
...
@@ -279,7 +281,7 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header)
static
int
__init
acpi_find_iosapic
(
int
global_vector
,
u32
*
irq_base
,
char
**
iosapic_address
)
{
struct
acpi_table_iosapic
*
iosapic
=
NULL
;
struct
acpi_table_iosapic
*
iosapic
;
int
ver
=
0
;
int
max_pin
=
0
;
char
*
p
=
0
;
...
...
@@ -338,7 +340,7 @@ acpi_parse_iosapic (acpi_table_entry_header *header)
static
int
__init
acpi_parse_plat_int_src
(
acpi_table_entry_header
*
header
)
{
struct
acpi_table_plat_int_src
*
plintsrc
=
NULL
;
struct
acpi_table_plat_int_src
*
plintsrc
;
int
vector
=
0
;
u32
irq_base
=
0
;
char
*
iosapic_address
=
NULL
;
...
...
@@ -381,7 +383,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
static
int
__init
acpi_parse_int_src_ovr
(
acpi_table_entry_header
*
header
)
{
struct
acpi_table_int_src_ovr
*
p
=
NULL
;
struct
acpi_table_int_src_ovr
*
p
;
p
=
(
struct
acpi_table_int_src_ovr
*
)
header
;
if
(
!
p
)
...
...
@@ -404,7 +406,7 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header)
static
int
__init
acpi_parse_nmi_src
(
acpi_table_entry_header
*
header
)
{
struct
acpi_table_nmi_src
*
nmi_src
=
NULL
;
struct
acpi_table_nmi_src
*
nmi_src
;
nmi_src
=
(
struct
acpi_table_nmi_src
*
)
header
;
if
(
!
nmi_src
)
...
...
@@ -425,10 +427,6 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
return
-
EINVAL
;
acpi_madt
=
(
struct
acpi_table_madt
*
)
__va
(
phys_addr
);
if
(
!
acpi_madt
)
{
printk
(
KERN_WARNING
PREFIX
"Unable to map MADT
\n
"
);
return
-
ENODEV
;
}
/* Get base address of IPI Message Block */
...
...
@@ -442,6 +440,28 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
}
static
int
__init
acpi_parse_fadt
(
unsigned
long
phys_addr
,
unsigned
long
size
)
{
struct
acpi_table_header
*
fadt_header
;
fadt_descriptor_rev2
*
fadt
;
if
(
!
phys_addr
||
!
size
)
return
-
EINVAL
;
fadt_header
=
(
struct
acpi_table_header
*
)
__va
(
phys_addr
);
if
(
fadt_header
->
revision
!=
3
)
return
-
ENODEV
;
/* Only deal with ACPI 2.0 FADT */
fadt
=
(
fadt_descriptor_rev2
*
)
fadt_header
;
if
(
!
(
fadt
->
iapc_boot_arch
&
BAF_8042_KEYBOARD_CONTROLLER
))
acpi_kbd_controller_present
=
0
;
return
0
;
}
int
__init
acpi_find_rsdp
(
unsigned
long
*
rsdp_phys
)
{
...
...
@@ -467,8 +487,8 @@ acpi_find_rsdp (unsigned long *rsdp_phys)
static
int
__init
acpi_parse_spcr
(
unsigned
long
phys_addr
,
unsigned
long
size
)
{
acpi_ser_t
*
spcr
=
NULL
;
unsigned
long
global_int
=
0
;
acpi_ser_t
*
spcr
;
unsigned
long
global_int
;
if
(
!
phys_addr
||
!
size
)
return
-
EINVAL
;
...
...
@@ -486,11 +506,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
*/
spcr
=
(
acpi_ser_t
*
)
__va
(
phys_addr
);
if
(
!
spcr
)
{
printk
(
KERN_WARNING
PREFIX
"Unable to map SPCR
\n
"
);
return
-
ENODEV
;
}
setup_serial_acpi
(
spcr
);
if
(
spcr
->
length
<
sizeof
(
acpi_ser_t
))
...
...
@@ -527,11 +542,11 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
int
__init
acpi_boot_init
(
char
*
cmdline
)
{
int
result
=
0
;
int
result
;
/* Initialize the ACPI boot-time table parser */
result
=
acpi_table_init
(
cmdline
);
if
(
0
!=
result
)
if
(
result
)
return
result
;
/*
...
...
@@ -542,57 +557,49 @@ acpi_boot_init (char *cmdline)
* information -- the successor to MPS tables.
*/
result
=
acpi_table_parse
(
ACPI_APIC
,
acpi_parse_madt
);
if
(
1
>
result
)
return
result
;
if
(
acpi_table_parse
(
ACPI_APIC
,
acpi_parse_madt
)
<
1
)
{
printk
(
KERN_ERR
PREFIX
"Can't find MADT
\n
"
);
goto
skip_madt
;
}
/* Local APIC */
result
=
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_ADDR_OVR
,
acpi_parse_lapic_addr_ovr
);
if
(
0
>
result
)
{
if
(
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_ADDR_OVR
,
acpi_parse_lapic_addr_ovr
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing LAPIC address override entry
\n
"
);
return
result
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_LSAPIC
,
acpi_parse_lsapic
);
if
(
1
>
result
)
{
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no LAPIC entries!
\n
"
);
return
-
ENODEV
;
}
if
(
acpi_table_parse_madt
(
ACPI_MADT_LSAPIC
,
acpi_parse_lsapic
)
<
1
)
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no LAPIC entries
\n
"
);
result
=
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_NMI
,
acpi_parse_lapic_nmi
);
if
(
0
>
result
)
{
if
(
acpi_table_parse_madt
(
ACPI_MADT_LAPIC_NMI
,
acpi_parse_lapic_nmi
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing LAPIC NMI entry
\n
"
);
return
result
;
}
/* I/O APIC */
result
=
acpi_table_parse_madt
(
ACPI_MADT_IOSAPIC
,
acpi_parse_iosapic
);
if
(
1
>
result
)
{
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no IOAPIC entries!
\n
"
);
return
((
result
==
0
)
?
-
ENODEV
:
result
);
}
if
(
acpi_table_parse_madt
(
ACPI_MADT_IOSAPIC
,
acpi_parse_iosapic
)
<
1
)
printk
(
KERN_ERR
PREFIX
"Error parsing MADT - no IOAPIC entries
\n
"
);
/* System-Level Interrupt Routing */
result
=
acpi_table_parse_madt
(
ACPI_MADT_PLAT_INT_SRC
,
acpi_parse_plat_int_src
);
if
(
0
>
result
)
{
if
(
acpi_table_parse_madt
(
ACPI_MADT_PLAT_INT_SRC
,
acpi_parse_plat_int_src
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing platform interrupt source entry
\n
"
);
return
result
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_INT_SRC_OVR
,
acpi_parse_int_src_ovr
);
if
(
0
>
result
)
{
if
(
acpi_table_parse_madt
(
ACPI_MADT_INT_SRC_OVR
,
acpi_parse_int_src_ovr
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing interrupt source overrides entry
\n
"
);
return
result
;
}
result
=
acpi_table_parse_madt
(
ACPI_MADT_NMI_SRC
,
acpi_parse_nmi_src
);
if
(
0
>
result
)
{
if
(
acpi_table_parse_madt
(
ACPI_MADT_NMI_SRC
,
acpi_parse_nmi_src
)
<
0
)
printk
(
KERN_ERR
PREFIX
"Error parsing NMI SRC entry
\n
"
);
return
result
;
}
skip_madt:
/* FADT says whether a legacy keyboard controller is present. */
if
(
acpi_table_parse
(
ACPI_FACP
,
acpi_parse_fadt
)
<
1
)
printk
(
KERN_ERR
PREFIX
"Can't find FADT
\n
"
);
#ifdef CONFIG_SERIAL_ACPI
/*
...
...
@@ -602,7 +609,7 @@ acpi_boot_init (char *cmdline)
* serial ports, EC, SMBus, etc.
*/
acpi_table_parse
(
ACPI_SPCR
,
acpi_parse_spcr
);
#endif
/*CONFIG_SERIAL_ACPI*/
#endif
#ifdef CONFIG_SMP
if
(
available_cpus
==
0
)
{
...
...
@@ -625,9 +632,9 @@ acpi_boot_init (char *cmdline)
int
__init
acpi_get_prt
(
struct
pci_vector_struct
**
vectors
,
int
*
count
)
{
struct
pci_vector_struct
*
vector
=
NULL
;
struct
list_head
*
node
=
NULL
;
struct
acpi_prt_entry
*
entry
=
NULL
;
struct
pci_vector_struct
*
vector
;
struct
list_head
*
node
;
struct
acpi_prt_entry
*
entry
;
int
i
=
0
;
if
(
!
vectors
||
!
count
)
...
...
arch/ia64/kernel/efi.c
View file @
8beb1642
...
...
@@ -125,9 +125,79 @@ efi_gettimeofday (struct timeval *tv)
tv
->
tv_usec
=
tm
.
nanosecond
/
1000
;
}
static
int
is_available_memory
(
efi_memory_desc_t
*
md
)
{
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
return
0
;
switch
(
md
->
type
)
{
case
EFI_LOADER_CODE
:
case
EFI_LOADER_DATA
:
case
EFI_BOOT_SERVICES_CODE
:
case
EFI_BOOT_SERVICES_DATA
:
case
EFI_CONVENTIONAL_MEMORY
:
return
1
;
}
return
0
;
}
/*
* Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers
* memory that is normally available to the kernel, issue a warning that some memory
* is being ignored.
*/
static
void
trim_bottom
(
efi_memory_desc_t
*
md
,
u64
start_addr
)
{
u64
num_skipped_pages
;
if
(
md
->
phys_addr
>=
start_addr
||
!
md
->
num_pages
)
return
;
num_skipped_pages
=
(
start_addr
-
md
->
phys_addr
)
>>
EFI_PAGE_SHIFT
;
if
(
num_skipped_pages
>
md
->
num_pages
)
num_skipped_pages
=
md
->
num_pages
;
if
(
is_available_memory
(
md
))
printk
(
KERN_NOTICE
"efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx
\n
"
,
__FUNCTION__
,
(
num_skipped_pages
<<
EFI_PAGE_SHIFT
)
>>
10
,
md
->
phys_addr
,
start_addr
-
IA64_GRANULE_SIZE
);
/*
* NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory
* descriptor list to become unsorted. In such a case, md->num_pages will be
* zero, so the Right Thing will happen.
*/
md
->
phys_addr
+=
num_skipped_pages
<<
EFI_PAGE_SHIFT
;
md
->
num_pages
-=
num_skipped_pages
;
}
static
void
trim_top
(
efi_memory_desc_t
*
md
,
u64
end_addr
)
{
u64
num_dropped_pages
,
md_end_addr
;
md_end_addr
=
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
);
if
(
md_end_addr
<=
end_addr
||
!
md
->
num_pages
)
return
;
num_dropped_pages
=
(
md_end_addr
-
end_addr
)
>>
EFI_PAGE_SHIFT
;
if
(
num_dropped_pages
>
md
->
num_pages
)
num_dropped_pages
=
md
->
num_pages
;
if
(
is_available_memory
(
md
))
printk
(
KERN_NOTICE
"efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "
"at 0x%lx
\n
"
,
__FUNCTION__
,
(
num_dropped_pages
<<
EFI_PAGE_SHIFT
)
>>
10
,
md
->
phys_addr
,
end_addr
);
md
->
num_pages
-=
num_dropped_pages
;
}
/*
* Walks the EFI memory map and calls CALLBACK once for each EFI
*
memory descriptor that
has memory that is available for OS use.
* Walks the EFI memory map and calls CALLBACK once for each EFI
memory descriptor that
* has memory that is available for OS use.
*/
void
efi_memmap_walk
(
efi_freemem_callback_t
callback
,
void
*
arg
)
...
...
@@ -137,9 +207,9 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
u64
start
;
u64
end
;
}
prev
,
curr
;
void
*
efi_map_start
,
*
efi_map_end
,
*
p
;
efi_memory_desc_t
*
md
;
u64
efi_desc_size
,
start
,
end
;
void
*
efi_map_start
,
*
efi_map_end
,
*
p
,
*
q
;
efi_memory_desc_t
*
md
,
*
check_md
;
u64
efi_desc_size
,
start
,
end
,
granule_addr
,
first_non_wb_addr
=
0
;
efi_map_start
=
__va
(
ia64_boot_param
->
efi_memmap
);
efi_map_end
=
efi_map_start
+
ia64_boot_param
->
efi_memmap_size
;
...
...
@@ -147,24 +217,56 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
for
(
p
=
efi_map_start
;
p
<
efi_map_end
;
p
+=
efi_desc_size
)
{
md
=
p
;
switch
(
md
->
type
)
{
case
EFI_LOADER_CODE
:
case
EFI_LOADER_DATA
:
case
EFI_BOOT_SERVICES_CODE
:
case
EFI_BOOT_SERVICES_DATA
:
case
EFI_CONVENTIONAL_MEMORY
:
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
continue
;
/* skip over non-WB memory descriptors; that's all we're interested in... */
if
(
!
(
md
->
attribute
&
EFI_MEMORY_WB
))
continue
;
if
(
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
>
first_non_wb_addr
)
{
/*
* Search for the next run of contiguous WB memory. Start search
* at first granule boundary covered by md.
*/
granule_addr
=
((
md
->
phys_addr
+
IA64_GRANULE_SIZE
-
1
)
&
-
IA64_GRANULE_SIZE
);
first_non_wb_addr
=
granule_addr
;
for
(
q
=
p
;
q
<
efi_map_end
;
q
+=
efi_desc_size
)
{
check_md
=
q
;
if
(
check_md
->
attribute
&
EFI_MEMORY_WB
)
trim_bottom
(
md
,
granule_addr
);
if
(
check_md
->
phys_addr
<
granule_addr
)
continue
;
if
(
!
(
check_md
->
attribute
&
EFI_MEMORY_WB
))
break
;
/* hit a non-WB region; stop search */
if
(
check_md
->
phys_addr
!=
first_non_wb_addr
)
break
;
/* hit a memory hole; stop search */
first_non_wb_addr
+=
check_md
->
num_pages
<<
EFI_PAGE_SHIFT
;
}
/* round it down to the previous granule-boundary: */
first_non_wb_addr
&=
-
IA64_GRANULE_SIZE
;
if
(
!
(
first_non_wb_addr
>
granule_addr
))
continue
;
/* couldn't find enough contiguous memory */
}
/* BUG_ON((md->phys_addr >> IA64_GRANULE_SHIFT) < first_non_wb_addr); */
trim_top
(
md
,
first_non_wb_addr
);
if
(
is_available_memory
(
md
))
{
if
(
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
>
mem_limit
)
{
if
(
md
->
phys_addr
>
mem_limit
)
continue
;
md
->
num_pages
=
(
mem_limit
-
md
->
phys_addr
)
>>
EFI_PAGE_SHIFT
;
}
if
(
md
->
num_pages
==
0
)
{
printk
(
"efi_memmap_walk: ignoring empty region at 0x%lx"
,
md
->
phys_addr
);
if
(
md
->
num_pages
==
0
)
continue
;
}
curr
.
start
=
PAGE_OFFSET
+
md
->
phys_addr
;
curr
.
end
=
curr
.
start
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
);
...
...
@@ -187,10 +289,6 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
prev
=
curr
;
}
}
break
;
default:
continue
;
}
}
if
(
prev_valid
)
{
...
...
@@ -268,8 +366,9 @@ efi_map_pal_code (void)
*/
psr
=
ia64_clear_ic
();
ia64_itr
(
0x1
,
IA64_TR_PALCODE
,
vaddr
&
mask
,
pte_val
(
pfn_pte
(
md
->
phys_addr
>>
PAGE_SHIFT
,
PAGE_KERNEL
)),
IA64_GRANULE_SHIFT
);
ia64_set_psr
(
psr
);
pte_val
(
pfn_pte
(
md
->
phys_addr
>>
PAGE_SHIFT
,
PAGE_KERNEL
)),
IA64_GRANULE_SHIFT
);
ia64_set_psr
(
psr
);
/* restore psr */
ia64_srlz_i
();
}
}
...
...
@@ -376,7 +475,7 @@ efi_init (void)
md
=
p
;
printk
(
"mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)
\n
"
,
i
,
md
->
type
,
md
->
attribute
,
md
->
phys_addr
,
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
)
-
1
,
md
->
phys_addr
+
(
md
->
num_pages
<<
EFI_PAGE_SHIFT
),
md
->
num_pages
>>
(
20
-
EFI_PAGE_SHIFT
));
}
}
...
...
arch/ia64/kernel/init_task.c
View file @
8beb1642
...
...
@@ -34,8 +34,8 @@ union init_thread {
}
s
;
unsigned
long
stack
[
KERNEL_STACK_SIZE
/
sizeof
(
unsigned
long
)];
}
init_thread_union
__attribute__
((
section
(
".data.init_task"
)))
=
{{
task:
INIT_TASK
(
init_thread_union
.
s
.
task
),
thread_info:
INIT_THREAD_INFO
(
init_thread_union
.
s
.
thread_info
)
.
task
=
INIT_TASK
(
init_thread_union
.
s
.
task
),
.
thread_info
=
INIT_THREAD_INFO
(
init_thread_union
.
s
.
thread_info
)
}};
asm
(
".global init_task; init_task = init_thread_union"
);
arch/ia64/kernel/iosapic.c
View file @
8beb1642
...
...
@@ -88,7 +88,7 @@ static struct {
static
struct
iosapic_irq
{
char
*
addr
;
/* base address of IOSAPIC */
unsigned
char
base_irq
;
/* first irq assigned to this IOSAPIC */
unsigned
int
base_irq
;
/* first irq assigned to this IOSAPIC */
char
pin
;
/* IOSAPIC pin (-1 => not an IOSAPIC irq) */
unsigned
char
dmode
:
3
;
/* delivery mode (see iosapic.h) */
unsigned
char
polarity
:
1
;
/* interrupt polarity (see iosapic.h) */
...
...
@@ -97,9 +97,9 @@ static struct iosapic_irq {
static
struct
iosapic
{
char
*
addr
;
/* base address of IOSAPIC */
unsigned
char
pcat_compat
;
/* 8259 compatibility flag */
unsigned
char
base_irq
;
/* first irq assigned to this IOSAPIC */
unsigned
int
base_irq
;
/* first irq assigned to this IOSAPIC */
unsigned
short
max_pin
;
/* max input pin supported in this IOSAPIC */
unsigned
char
pcat_compat
;
/* 8259 compatibility flag */
}
iosapic_lists
[
256
]
__initdata
;
static
int
num_iosapic
=
0
;
...
...
@@ -322,14 +322,14 @@ iosapic_end_level_irq (unsigned int irq)
#define iosapic_ack_level_irq nop
struct
hw_interrupt_type
irq_type_iosapic_level
=
{
typename:
"IO-SAPIC-level"
,
startup:
iosapic_startup_level_irq
,
shutdown:
iosapic_shutdown_level_irq
,
enable:
iosapic_enable_level_irq
,
disable:
iosapic_disable_level_irq
,
ack:
iosapic_ack_level_irq
,
end:
iosapic_end_level_irq
,
set_affinity:
iosapic_set_affinity
.
typename
=
"IO-SAPIC-level"
,
.
startup
=
iosapic_startup_level_irq
,
.
shutdown
=
iosapic_shutdown_level_irq
,
.
enable
=
iosapic_enable_level_irq
,
.
disable
=
iosapic_disable_level_irq
,
.
ack
=
iosapic_ack_level_irq
,
.
end
=
iosapic_end_level_irq
,
.
set_affinity
=
iosapic_set_affinity
};
/*
...
...
@@ -366,14 +366,14 @@ iosapic_ack_edge_irq (unsigned int irq)
#define iosapic_end_edge_irq nop
struct
hw_interrupt_type
irq_type_iosapic_edge
=
{
typename:
"IO-SAPIC-edge"
,
startup:
iosapic_startup_edge_irq
,
shutdown:
iosapic_disable_edge_irq
,
enable:
iosapic_enable_edge_irq
,
disable:
iosapic_disable_edge_irq
,
ack:
iosapic_ack_edge_irq
,
end:
iosapic_end_edge_irq
,
set_affinity:
iosapic_set_affinity
.
typename
=
"IO-SAPIC-edge"
,
.
startup
=
iosapic_startup_edge_irq
,
.
shutdown
=
iosapic_disable_edge_irq
,
.
enable
=
iosapic_enable_edge_irq
,
.
disable
=
iosapic_disable_edge_irq
,
.
ack
=
iosapic_ack_edge_irq
,
.
end
=
iosapic_end_edge_irq
,
.
set_affinity
=
iosapic_set_affinity
};
unsigned
int
...
...
@@ -679,11 +679,10 @@ iosapic_init_pci_irq (void)
pci_irq
.
route
[
i
].
bus
,
pci_irq
.
route
[
i
].
pci_id
>>
16
,
pci_irq
.
route
[
i
].
pin
,
iosapic_irq
[
vector
].
base_irq
+
iosapic_irq
[
vector
].
pin
,
vector
);
#endif
/*
* Forget not to program the IOSAPIC RTE per ACPI _PRT
* NOTE: The IOSAPIC RTE will be programmed in iosapic_pci_fixup(). It
* needs to be done there to ensure PCI hotplug works right.
*/
set_rte
(
vector
,
(
ia64_get_lid
()
>>
16
)
&
0xffff
);
}
}
...
...
arch/ia64/kernel/irq_ia64.c
View file @
8beb1642
...
...
@@ -36,6 +36,10 @@
#include <asm/pgtable.h>
#include <asm/system.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
#endif
#define IRQ_DEBUG 0
/* default base addr of IPI table */
...
...
@@ -144,9 +148,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
extern
void
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
);
static
struct
irqaction
ipi_irqaction
=
{
handler:
handle_IPI
,
flags:
SA_INTERRUPT
,
name:
"IPI"
.
handler
=
handle_IPI
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"IPI"
};
#endif
...
...
@@ -172,6 +176,9 @@ init_IRQ (void)
register_percpu_irq
(
IA64_SPURIOUS_INT_VECTOR
,
NULL
);
#ifdef CONFIG_SMP
register_percpu_irq
(
IA64_IPI_VECTOR
,
&
ipi_irqaction
);
#endif
#ifdef CONFIG_PERFMON
perfmon_init_percpu
();
#endif
platform_irq_init
();
}
...
...
arch/ia64/kernel/irq_lsapic.c
View file @
8beb1642
...
...
@@ -27,12 +27,12 @@ lsapic_noop (unsigned int irq)
}
struct
hw_interrupt_type
irq_type_ia64_lsapic
=
{
typename:
"LSAPIC"
,
startup:
lsapic_noop_startup
,
shutdown:
lsapic_noop
,
enable:
lsapic_noop
,
disable:
lsapic_noop
,
ack:
lsapic_noop
,
end:
lsapic_noop
,
set_affinity:
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
lsapic_noop
.
typename
=
"LSAPIC"
,
.
startup
=
lsapic_noop_startup
,
.
shutdown
=
lsapic_noop
,
.
enable
=
lsapic_noop
,
.
disable
=
lsapic_noop
,
.
ack
=
lsapic_noop
,
.
end
=
lsapic_noop
,
.
set_affinity
=
(
void
(
*
)(
unsigned
int
,
unsigned
long
))
lsapic_noop
};
arch/ia64/kernel/machvec.c
View file @
8beb1642
...
...
@@ -11,13 +11,16 @@
struct
ia64_machine_vector
ia64_mv
;
/*
* Most platforms use this routine for mapping page frame addresses
* into a memory map index.
* Most platforms use this routine for mapping page frame addresses into a memory map
* index.
*
* Note: we can't use __pa() because map_nr_dense(X) MUST map to something >= max_mapnr if
* X is outside the identity mapped kernel space.
*/
unsigned
long
map_nr_dense
(
unsigned
long
addr
)
{
return
MAP_NR_DENSE
(
addr
)
;
return
(
addr
-
PAGE_OFFSET
)
>>
PAGE_SHIFT
;
}
static
struct
ia64_machine_vector
*
...
...
arch/ia64/kernel/mca.c
View file @
8beb1642
...
...
@@ -82,27 +82,27 @@ extern void ia64_slave_init_handler (void);
extern
struct
hw_interrupt_type
irq_type_iosapic_level
;
static
struct
irqaction
cmci_irqaction
=
{
handler:
ia64_mca_cmc_int_handler
,
flags:
SA_INTERRUPT
,
name:
"cmc_hndlr"
.
handler
=
ia64_mca_cmc_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cmc_hndlr"
};
static
struct
irqaction
mca_rdzv_irqaction
=
{
handler:
ia64_mca_rendez_int_handler
,
flags:
SA_INTERRUPT
,
name:
"mca_rdzv"
.
handler
=
ia64_mca_rendez_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"mca_rdzv"
};
static
struct
irqaction
mca_wkup_irqaction
=
{
handler:
ia64_mca_wakeup_int_handler
,
flags:
SA_INTERRUPT
,
name:
"mca_wkup"
.
handler
=
ia64_mca_wakeup_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"mca_wkup"
};
static
struct
irqaction
mca_cpe_irqaction
=
{
handler:
ia64_mca_cpe_int_handler
,
flags:
SA_INTERRUPT
,
name:
"cpe_hndlr"
.
handler
=
ia64_mca_cpe_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cpe_hndlr"
};
/*
...
...
arch/ia64/kernel/mca_asm.S
View file @
8beb1642
...
...
@@ -684,9 +684,9 @@ ia64_os_mca_tlb_error_check:
movl
r3
=
SAL_GET_STATE_INFO
;;
DATA_VA_TO_PA
(
r7
)
;; // convert to physical address
ld8
r8
=[
r7
],
8
;; // get pdesc function pointer
DATA_VA_TO_PA
(
r8
)
//
convert
to
physical
address
dep
r8
=
0
,
r8
,
61
,
3
;; // convert SAL VA to PA
ld8
r1
=[
r7
]
;; // set new (ia64_sal) gp
DATA_VA_TO_PA
(
r1
)
//
convert
to
physical
address
dep
r1
=
0
,
r1
,
61
,
3
;; // convert SAL VA to PA
mov
b6
=
r8
alloc
r5
=
ar
.
pfs
,
8
,
0
,
8
,
0
;; // allocate stack frame for SAL call
...
...
arch/ia64/kernel/pci.c
View file @
8beb1642
...
...
@@ -265,12 +265,37 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
int
pcibios_enable_device
(
struct
pci_dev
*
dev
)
{
u16
cmd
,
old_cmd
;
int
idx
;
struct
resource
*
r
;
if
(
!
dev
)
return
-
EINVAL
;
/* Not needed, since we enable all devices at startup. */
pci_read_config_word
(
dev
,
PCI_COMMAND
,
&
cmd
);
old_cmd
=
cmd
;
for
(
idx
=
0
;
idx
<
6
;
idx
++
)
{
r
=
&
dev
->
resource
[
idx
];
if
(
!
r
->
start
&&
r
->
end
)
{
printk
(
KERN_ERR
"PCI: Device %s not available because of resource collisions
\n
"
,
dev
->
slot_name
);
return
-
EINVAL
;
}
if
(
r
->
flags
&
IORESOURCE_IO
)
cmd
|=
PCI_COMMAND_IO
;
if
(
r
->
flags
&
IORESOURCE_MEM
)
cmd
|=
PCI_COMMAND_MEMORY
;
}
if
(
dev
->
resource
[
PCI_ROM_RESOURCE
].
start
)
cmd
|=
PCI_COMMAND_MEMORY
;
if
(
cmd
!=
old_cmd
)
{
printk
(
"PCI: Enabling device %s (%04x -> %04x)
\n
"
,
dev
->
slot_name
,
old_cmd
,
cmd
);
pci_write_config_word
(
dev
,
PCI_COMMAND
,
cmd
);
}
printk
(
KERN_INFO
"PCI: Found IRQ %d for device %s
\n
"
,
dev
->
irq
,
dev
->
slot_name
);
return
0
;
}
...
...
arch/ia64/kernel/perfmon.c
View file @
8beb1642
...
...
@@ -106,6 +106,12 @@
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) (i==0)
#endif
/*
* debugging
*/
...
...
@@ -277,8 +283,8 @@ typedef struct {
typedef
struct
{
pfm_pmu_reg_type_t
type
;
int
pm_pos
;
int
(
*
read_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
);
int
(
*
write_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
);
int
(
*
read_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
int
(
*
write_check
)(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
unsigned
long
dep_pmd
[
4
];
unsigned
long
dep_pmc
[
4
];
}
pfm_reg_desc_t
;
...
...
@@ -396,7 +402,7 @@ static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values
static
void
pfm_vm_close
(
struct
vm_area_struct
*
area
);
static
struct
vm_operations_struct
pfm_vm_ops
=
{
close:
pfm_vm_close
.
close
=
pfm_vm_close
};
/*
...
...
@@ -902,8 +908,8 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
/*
* and it must be a valid CPU
*/
cpu
=
ff
s
(
pfx
->
ctx_cpu_mask
);
if
(
!
cpu_online
(
cpu
)
)
{
cpu
=
ff
z
(
~
pfx
->
ctx_cpu_mask
);
if
(
cpu_is_online
(
cpu
)
==
0
)
{
DBprintk
((
"CPU%d is not online
\n
"
,
cpu
));
return
-
EINVAL
;
}
...
...
@@ -925,11 +931,12 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
DBprintk
((
"must have notify_pid when blocking for [%d]
\n
"
,
task
->
pid
));
return
-
EINVAL
;
}
#if 0
if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {
DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));
return -EINVAL;
}
#endif
}
/* probably more to add here */
...
...
@@ -968,7 +975,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
if
(
ctx_flags
&
PFM_FL_SYSTEM_WIDE
)
{
/* at this point, we know there is at least one bit set */
cpu
=
ff
s
(
tmp
.
ctx_cpu_mask
)
-
1
;
cpu
=
ff
z
(
~
tmp
.
ctx_cpu_mask
)
;
DBprintk
((
"requesting CPU%d currently on CPU%d
\n
"
,
cpu
,
smp_processor_id
()));
...
...
@@ -1280,7 +1287,7 @@ pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/*
* execute write checker, if any
*/
if
(
PMC_WR_FUNC
(
cnum
))
ret
=
PMC_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
);
if
(
PMC_WR_FUNC
(
cnum
))
ret
=
PMC_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
,
regs
);
abort_mission:
if
(
ret
==
-
EINVAL
)
reg_retval
=
PFM_REG_RETFL_EINVAL
;
...
...
@@ -1371,7 +1378,7 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/*
* execute write checker, if any
*/
if
(
PMD_WR_FUNC
(
cnum
))
ret
=
PMD_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
);
if
(
PMD_WR_FUNC
(
cnum
))
ret
=
PMD_WR_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
,
regs
);
abort_mission:
if
(
ret
==
-
EINVAL
)
reg_retval
=
PFM_REG_RETFL_EINVAL
;
...
...
@@ -1394,6 +1401,8 @@ pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int coun
/* keep track of what we use */
CTX_USED_PMD
(
ctx
,
pmu_conf
.
pmd_desc
[(
cnum
)].
dep_pmd
[
0
]);
/* mark this register as used as well */
CTX_USED_PMD
(
ctx
,
RDEP
(
cnum
));
/* writes to unimplemented part is ignored, so this is safe */
ia64_set_pmd
(
cnum
,
tmp
.
reg_value
&
pmu_conf
.
perf_ovfl_val
);
...
...
@@ -1438,7 +1447,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
DBprintk
((
"ctx_last_cpu=%d for [%d]
\n
"
,
atomic_read
(
&
ctx
->
ctx_last_cpu
),
task
->
pid
));
for
(
i
=
0
;
i
<
count
;
i
++
,
req
++
)
{
unsigned
long
reg_val
=
~
0UL
,
ctx_val
=
~
0UL
;
unsigned
long
ctx_val
=
~
0UL
;
if
(
copy_from_user
(
&
tmp
,
req
,
sizeof
(
tmp
)))
return
-
EFAULT
;
...
...
@@ -1462,7 +1471,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
*/
if
(
atomic_read
(
&
ctx
->
ctx_last_cpu
)
==
smp_processor_id
()){
ia64_srlz_d
();
val
=
reg_val
=
ia64_get_pmd
(
cnum
);
val
=
ia64_get_pmd
(
cnum
);
DBprintk
((
"reading pmd[%u]=0x%lx from hw
\n
"
,
cnum
,
val
));
}
else
{
#ifdef CONFIG_SMP
...
...
@@ -1484,7 +1493,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
}
#endif
/* context has been saved */
val
=
reg_val
=
th
->
pmd
[
cnum
];
val
=
th
->
pmd
[
cnum
];
}
if
(
PMD_IS_COUNTING
(
cnum
))
{
/*
...
...
@@ -1493,9 +1502,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
val
&=
pmu_conf
.
perf_ovfl_val
;
val
+=
ctx_val
=
ctx
->
ctx_soft_pmds
[
cnum
].
val
;
}
else
{
val
=
reg_val
=
ia64_get_pmd
(
cnum
);
}
}
tmp
.
reg_value
=
val
;
...
...
@@ -1503,14 +1510,13 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
* execute read checker, if any
*/
if
(
PMD_RD_FUNC
(
cnum
))
{
ret
=
PMD_RD_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
);
ret
=
PMD_RD_FUNC
(
cnum
)(
task
,
cnum
,
&
tmp
.
reg_value
,
regs
);
}
PFM_REG_RETFLAG_SET
(
tmp
.
reg_flags
,
ret
);
DBprintk
((
"read pmd[%u] ret=%d soft_pmd=0x%lx reg=0x%lx pmc=0x%lx
\n
"
,
cnum
,
ret
,
ctx_val
,
reg_val
,
ia64_get_pmc
(
cnum
)));
DBprintk
((
"read pmd[%u] ret=%d value=0x%lx pmc=0x%lx
\n
"
,
cnum
,
ret
,
val
,
ia64_get_pmc
(
cnum
)));
if
(
copy_to_user
(
req
,
&
tmp
,
sizeof
(
tmp
)))
return
-
EFAULT
;
}
...
...
@@ -1553,15 +1559,11 @@ pfm_use_debug_registers(struct task_struct *task)
*/
if
(
ctx
&&
ctx
->
ctx_fl_using_dbreg
==
1
)
return
-
1
;
/*
* XXX: not pretty
*/
LOCK_PFS
();
/*
* We only allow the use of debug registers when there is no system
* wide monitoring
* XXX: we could relax this by
* We cannot allow setting breakpoints when system wide monitoring
* sessions are using the debug registers.
*/
if
(
pfm_sessions
.
pfs_sys_use_dbregs
>
0
)
ret
=
-
1
;
...
...
@@ -1921,7 +1923,6 @@ typedef union {
dbr_mask_reg_t
dbr
;
}
dbreg_t
;
static
int
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
)
{
...
...
@@ -1963,8 +1964,8 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
if
(
ctx
->
ctx_fl_system
)
{
/* we mark ourselves as owner of the debug registers */
ctx
->
ctx_fl_using_dbreg
=
1
;
}
else
{
if
(
ctx
->
ctx_fl_using_dbreg
==
0
)
{
DBprintk
((
"system-wide setting fl_using_dbreg for [%d]
\n
"
,
task
->
pid
));
}
else
if
(
first_time
)
{
ret
=
-
EBUSY
;
if
((
thread
->
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
{
DBprintk
((
"debug registers already in use for [%d]
\n
"
,
task
->
pid
));
...
...
@@ -1973,6 +1974,7 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
/* we mark ourselves as owner of the debug registers */
ctx
->
ctx_fl_using_dbreg
=
1
;
DBprintk
((
"setting fl_using_dbreg for [%d]
\n
"
,
task
->
pid
));
/*
* Given debug registers cannot be used for both debugging
* and performance monitoring at the same time, we reuse
...
...
@@ -1980,20 +1982,27 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
*/
memset
(
task
->
thread
.
dbr
,
0
,
sizeof
(
task
->
thread
.
dbr
));
memset
(
task
->
thread
.
ibr
,
0
,
sizeof
(
task
->
thread
.
ibr
));
}
/*
* clear hardware registers to make sure we don't
* pick up stale state
*/
for
(
i
=
0
;
i
<
pmu_conf
.
num_ibrs
;
i
++
)
{
ia64_set_ibr
(
i
,
0UL
);
}
ia64_srlz_i
();
for
(
i
=
0
;
i
<
pmu_conf
.
num_dbrs
;
i
++
)
{
ia64_set_dbr
(
i
,
0UL
);
}
ia64_srlz_d
();
if
(
first_time
)
{
DBprintk
((
"[%d] clearing ibrs,dbrs
\n
"
,
task
->
pid
));
/*
* clear hardware registers to make sure we don't
* pick up stale state.
*
* for a system wide session, we do not use
* thread.dbr, thread.ibr because this process
* never leaves the current CPU and the state
* is shared by all processes running on it
*/
for
(
i
=
0
;
i
<
pmu_conf
.
num_ibrs
;
i
++
)
{
ia64_set_ibr
(
i
,
0UL
);
}
ia64_srlz_i
();
for
(
i
=
0
;
i
<
pmu_conf
.
num_dbrs
;
i
++
)
{
ia64_set_dbr
(
i
,
0UL
);
}
ia64_srlz_d
();
}
ret
=
-
EFAULT
;
...
...
@@ -2361,9 +2370,9 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
{
struct
pt_regs
*
regs
=
(
struct
pt_regs
*
)
&
stack
;
struct
task_struct
*
task
=
current
;
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
pfm_context_t
*
ctx
;
size_t
sz
;
int
ret
=
-
ESRCH
,
narg
;
int
ret
,
narg
;
/*
* reject any call if perfmon was disabled at initialization time
...
...
@@ -2393,6 +2402,8 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
if
(
pid
!=
current
->
pid
)
{
ret
=
-
ESRCH
;
read_lock
(
&
tasklist_lock
);
task
=
find_task_by_pid
(
pid
);
...
...
@@ -2407,10 +2418,11 @@ sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6,
ret
=
check_task_state
(
task
);
if
(
ret
!=
0
)
goto
abort_call
;
}
ctx
=
task
->
thread
.
pfm_context
;
}
}
ctx
=
task
->
thread
.
pfm_context
;
if
(
PFM_CMD_USE_CTX
(
cmd
))
{
ret
=
-
EINVAL
;
if
(
ctx
==
NULL
)
{
...
...
@@ -2953,11 +2965,6 @@ perfmon_interrupt (int irq, void *arg, struct pt_regs *regs)
static
int
perfmon_proc_info
(
char
*
page
)
{
#ifdef CONFIG_SMP
#define cpu_is_online(i) (cpu_online_map & (1UL << i))
#else
#define cpu_is_online(i) 1
#endif
char
*
p
=
page
;
int
i
;
...
...
@@ -4118,9 +4125,9 @@ pfm_cleanup_notifiers(struct task_struct *task)
}
static
struct
irqaction
perfmon_irqaction
=
{
handler:
perfmon_interrupt
,
flags:
SA_INTERRUPT
,
name:
"perfmon"
.
handler
=
perfmon_interrupt
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"perfmon"
};
...
...
@@ -4150,11 +4157,6 @@ perfmon_init (void)
pal_perf_mon_info_u_t
pm_info
;
s64
status
;
register_percpu_irq
(
IA64_PERFMON_VECTOR
,
&
perfmon_irqaction
);
ia64_set_pmv
(
IA64_PERFMON_VECTOR
);
ia64_srlz_d
();
pmu_conf
.
pfm_is_disabled
=
1
;
printk
(
"perfmon: version %u.%u (sampling format v%u.%u) IRQ %u
\n
"
,
...
...
@@ -4232,6 +4234,9 @@ __initcall(perfmon_init);
void
perfmon_init_percpu
(
void
)
{
if
(
smp_processor_id
()
==
0
)
register_percpu_irq
(
IA64_PERFMON_VECTOR
,
&
perfmon_irqaction
);
ia64_set_pmv
(
IA64_PERFMON_VECTOR
);
ia64_srlz_d
();
}
...
...
arch/ia64/kernel/perfmon_itanium.h
0 → 100644
View file @
8beb1642
/*
* This file contains the Itanium PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_ITANIUM
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
static
int
pfm_ita_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
static
int
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
pfm_reg_desc_t
pmc_desc
[
256
]
=
{
/* pmc0 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc1 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc2 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc3 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc4 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc5 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc6 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc7 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc8 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc9 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc10 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
0
)
|
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc11 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
pfm_ita_pmc_check
,
{
RDEP
(
2
)
|
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc12 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc13 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_ita_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
pfm_reg_desc_t
pmd_desc
[
256
]
=
{
/* pmd0 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd1 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
0
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd2 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd3 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd4 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
}},
/* pmd5 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
}},
/* pmd6 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
}},
/* pmd7 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
}},
/* pmd8 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd9 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd10 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd11 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd12 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd13 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd14 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd15 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd16 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd17 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
3
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
int
pfm_ita_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
)
{
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
int
ret
;
/*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
13
&&
((
*
val
&
0x1
)
==
0UL
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
1
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
/*
* we must clear the (data) debug registers if pmc11.pt bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
11
&&
((
*
val
>>
28
)
&
0x1
)
==
0
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
0
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
return
0
;
}
arch/ia64/kernel/perfmon_mckinley.h
0 → 100644
View file @
8beb1642
/*
* This file contains the McKinley PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_MCKINLEY
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
static
int
pfm_mck_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
);
static
int
pfm_write_ibr_dbr
(
int
mode
,
struct
task_struct
*
task
,
void
*
arg
,
int
count
,
struct
pt_regs
*
regs
);
static
pfm_reg_desc_t
pmc_desc
[
256
]
=
{
/* pmc0 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc1 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc2 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc3 */
{
PFM_REG_CONTROL
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc4 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
pfm_mck_pmc_check
,
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc5 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc6 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc7 */
{
PFM_REG_COUNTING
,
6
,
NULL
,
NULL
,
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc8 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc9 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc10 */
{
PFM_REG_MONITOR
,
4
,
NULL
,
NULL
,
{
RDEP
(
0
)
|
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc11 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc12 */
{
PFM_REG_MONITOR
,
6
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc13 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc14 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc15 */
{
PFM_REG_CONFIG
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
pfm_reg_desc_t
pmd_desc
[
256
]
=
{
/* pmd0 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd1 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
0
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
10
),
0UL
,
0UL
,
0UL
}},
/* pmd2 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd3 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
/* pmd4 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
4
),
0UL
,
0UL
,
0UL
}},
/* pmd5 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
}},
/* pmd6 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
}},
/* pmd7 */
{
PFM_REG_COUNTING
,
0
,
NULL
,
NULL
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
}},
/* pmd8 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd9 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd10 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd11 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd12 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd13 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd14 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd15 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd16 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
12
),
0UL
,
0UL
,
0UL
}},
/* pmd17 */
{
PFM_REG_BUFFER
,
0
,
NULL
,
NULL
,
{
RDEP
(
2
)
|
RDEP
(
3
),
0UL
,
0UL
,
0UL
},
{
RDEP
(
11
),
0UL
,
0UL
,
0UL
}},
{
PFM_REG_NONE
,
0
,
NULL
,
NULL
,
{
0
,},
{
0
,}},
/* end marker */
};
static
int
pfm_mck_pmc_check
(
struct
task_struct
*
task
,
unsigned
int
cnum
,
unsigned
long
*
val
,
struct
pt_regs
*
regs
)
{
struct
thread_struct
*
th
=
&
task
->
thread
;
pfm_context_t
*
ctx
=
task
->
thread
.
pfm_context
;
int
ret
=
0
,
check_case1
=
0
;
unsigned
long
val8
=
0
,
val14
=
0
,
val13
=
0
;
/*
* we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
13
&&
(
*
val
&
(
0xfUL
<<
45
))
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
1
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
if
(
cnum
==
14
&&
((
*
val
&
0x2222
)
!=
0x2222
)
&&
ctx
->
ctx_fl_using_dbreg
==
0
)
{
/* don't mix debug with perfmon */
if
((
task
->
thread
.
flags
&
IA64_THREAD_DBG_VALID
)
!=
0
)
return
-
EINVAL
;
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret
=
pfm_write_ibr_dbr
(
0
,
task
,
NULL
,
0
,
regs
);
if
(
ret
)
return
ret
;
}
switch
(
cnum
)
{
case
4
:
*
val
|=
1UL
<<
23
;
/* force power enable bit */
break
;
case
8
:
val8
=
*
val
;
val13
=
th
->
pmc
[
13
];
val14
=
th
->
pmc
[
14
];
check_case1
=
1
;
break
;
case
13
:
val8
=
th
->
pmc
[
8
];
val13
=
*
val
;
val14
=
th
->
pmc
[
14
];
check_case1
=
1
;
break
;
case
14
:
val8
=
th
->
pmc
[
13
];
val13
=
th
->
pmc
[
13
];
val14
=
*
val
;
check_case1
=
1
;
break
;
}
/* check illegal configuration which can produce inconsistencies in tagging
* i-side events in L1D and L2 caches
*/
if
(
check_case1
)
{
ret
=
((
val13
>>
45
)
&
0xf
)
==
0
&&
((
val8
&
0x1
)
==
0
)
&&
((((
val14
>>
1
)
&
0x3
)
==
0x2
||
((
val14
>>
1
)
&
0x3
)
==
0x0
)
||
(((
val14
>>
4
)
&
0x3
)
==
0x2
||
((
val14
>>
4
)
&
0x3
)
==
0x0
));
if
(
ret
)
printk
(
"perfmon: failure check_case1
\n
"
);
}
return
ret
?
-
EINVAL
:
0
;
}
arch/ia64/kernel/process.c
View file @
8beb1642
...
...
@@ -325,6 +325,11 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy parts of thread_struct: */
p
->
thread
.
ksp
=
(
unsigned
long
)
child_stack
-
16
;
/* stop some PSR bits from being inherited: */
child_ptregs
->
cr_ipsr
=
((
child_ptregs
->
cr_ipsr
|
IA64_PSR_BITS_TO_SET
)
&
~
IA64_PSR_BITS_TO_CLEAR
);
/*
* NOTE: The calling convention considers all floating point
* registers in the high partition (fph) to be scratch. Since
...
...
arch/ia64/kernel/setup.c
View file @
8beb1642
...
...
@@ -455,10 +455,10 @@ c_stop (struct seq_file *m, void *v)
}
struct
seq_operations
cpuinfo_op
=
{
start:
c_start
,
next:
c_next
,
stop:
c_stop
,
show:
show_cpuinfo
.
start
=
c_start
,
.
next
=
c_next
,
.
stop
=
c_stop
,
.
show
=
show_cpuinfo
};
void
...
...
@@ -542,7 +542,18 @@ cpu_init (void)
extern
char
__per_cpu_end
[];
int
cpu
=
smp_processor_id
();
my_cpu_data
=
alloc_bootmem_pages
(
__per_cpu_end
-
__per_cpu_start
);
if
(
__per_cpu_end
-
__per_cpu_start
>
PAGE_SIZE
)
panic
(
"Per-cpu data area too big! (%Zu > %Zu)"
,
__per_cpu_end
-
__per_cpu_start
,
PAGE_SIZE
);
/*
* On the BSP, the page allocator isn't initialized by the time we get here. On
* the APs, the bootmem allocator is no longer available...
*/
if
(
cpu
==
0
)
my_cpu_data
=
alloc_bootmem_pages
(
__per_cpu_end
-
__per_cpu_start
);
else
my_cpu_data
=
(
void
*
)
get_free_page
(
GFP_KERNEL
);
memcpy
(
my_cpu_data
,
__phys_per_cpu_start
,
__per_cpu_end
-
__per_cpu_start
);
__per_cpu_offset
[
cpu
]
=
(
char
*
)
my_cpu_data
-
__per_cpu_start
;
my_cpu_info
=
my_cpu_data
+
((
char
*
)
&
cpu_info
-
__per_cpu_start
);
...
...
arch/ia64/kernel/signal.c
View file @
8beb1642
...
...
@@ -146,6 +146,7 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
if
(
from
->
si_code
<
0
)
{
if
(
__copy_to_user
(
to
,
from
,
sizeof
(
siginfo_t
)))
return
-
EFAULT
;
return
0
;
}
else
{
int
err
;
...
...
arch/ia64/kernel/smpboot.c
View file @
8beb1642
...
...
@@ -425,7 +425,7 @@ do_boot_cpu (int sapicid)
task_for_booting_cpu
=
idle
;
Dprintk
(
"Sending wakeup vector %u to AP 0x%x/0x%x.
\n
"
,
ap_wakeup_vector
,
cpu
,
sapicid
);
Dprintk
(
"Sending wakeup vector %
l
u to AP 0x%x/0x%x.
\n
"
,
ap_wakeup_vector
,
cpu
,
sapicid
);
platform_send_ipi
(
cpu
,
ap_wakeup_vector
,
IA64_IPI_DM_INT
,
0
);
...
...
@@ -537,7 +537,7 @@ smp_boot_cpus (void)
printk
(
"Before bogomips.
\n
"
);
if
(
!
cpucount
)
{
printk
(
KERN_
ERR
"Error
: only one processor found.
\n
"
);
printk
(
KERN_
WARNING
"Warning
: only one processor found.
\n
"
);
}
else
{
unsigned
long
bogosum
=
0
;
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
...
...
arch/ia64/kernel/time.c
View file @
8beb1642
...
...
@@ -41,21 +41,22 @@ do_profile (unsigned long ip)
extern
unsigned
long
prof_cpu_mask
;
extern
char
_stext
;
if
(
!
prof_buffer
)
return
;
if
(
!
((
1UL
<<
smp_processor_id
())
&
prof_cpu_mask
))
return
;
if
(
prof_buffer
&&
current
->
pid
)
{
ip
-=
(
unsigned
long
)
&
_stext
;
ip
>>=
prof_shift
;
/*
* Don't ignore out-of-bounds IP values silently, put them into the last
* histogram slot, so if present, they will show up as a sharp peak.
*/
if
(
ip
>
prof_len
-
1
)
ip
=
prof_len
-
1
;
ip
-=
(
unsigned
long
)
&
_stext
;
ip
>>=
prof_shift
;
/*
* Don't ignore out-of-bounds IP values silently, put them into the last
* histogram slot, so if present, they will show up as a sharp peak.
*/
if
(
ip
>
prof_len
-
1
)
ip
=
prof_len
-
1
;
atomic_inc
((
atomic_t
*
)
&
prof_buffer
[
ip
]);
}
atomic_inc
((
atomic_t
*
)
&
prof_buffer
[
ip
]);
}
/*
...
...
@@ -285,9 +286,9 @@ ia64_init_itm (void)
}
static
struct
irqaction
timer_irqaction
=
{
handler:
timer_interrupt
,
flags:
SA_INTERRUPT
,
name:
"timer"
.
handler
=
timer_interrupt
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"timer"
};
void
__init
...
...
arch/ia64/kernel/traps.c
View file @
8beb1642
...
...
@@ -93,9 +93,9 @@ die (const char *str, struct pt_regs *regs, long err)
int
lock_owner
;
int
lock_owner_depth
;
}
die
=
{
lock:
SPIN_LOCK_UNLOCKED
,
lock_owner:
-
1
,
lock_owner_depth:
0
.
lock
=
SPIN_LOCK_UNLOCKED
,
.
lock_owner
=
-
1
,
.
lock_owner_depth
=
0
};
if
(
die
.
lock_owner
!=
smp_processor_id
())
{
...
...
@@ -435,7 +435,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned
long
code
,
error
=
isr
;
struct
siginfo
siginfo
;
char
buf
[
128
];
int
result
;
int
result
,
sig
;
static
const
char
*
reason
[]
=
{
"IA-64 Illegal Operation fault"
,
"IA-64 Privileged Operation fault"
,
...
...
@@ -479,6 +479,30 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break
;
case
26
:
/* NaT Consumption */
if
(
user_mode
(
regs
))
{
if
(((
isr
>>
4
)
&
0xf
)
==
2
)
{
/* NaT page consumption */
sig
=
SIGSEGV
;
code
=
SEGV_ACCERR
;
}
else
{
/* register NaT consumption */
sig
=
SIGILL
;
code
=
ILL_ILLOPN
;
}
siginfo
.
si_signo
=
sig
;
siginfo
.
si_code
=
code
;
siginfo
.
si_errno
=
0
;
siginfo
.
si_addr
=
(
void
*
)
(
regs
->
cr_iip
+
ia64_psr
(
regs
)
->
ri
);
siginfo
.
si_imm
=
vector
;
siginfo
.
si_flags
=
__ISR_VALID
;
siginfo
.
si_isr
=
isr
;
force_sig_info
(
sig
,
&
siginfo
,
current
);
return
;
}
else
if
(
done_with_exception
(
regs
))
return
;
sprintf
(
buf
,
"NaT consumption"
);
break
;
case
31
:
/* Unsupported Data Reference */
if
(
user_mode
(
regs
))
{
siginfo
.
si_signo
=
SIGILL
;
...
...
@@ -491,7 +515,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
force_sig_info
(
SIGILL
,
&
siginfo
,
current
);
return
;
}
sprintf
(
buf
,
(
vector
==
26
)
?
"NaT consumption"
:
"Unsupported data reference"
);
sprintf
(
buf
,
"Unsupported data reference"
);
break
;
case
29
:
/* Debug */
...
...
@@ -508,16 +532,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
if
(
ia64_psr
(
regs
)
->
is
==
0
)
ifa
=
regs
->
cr_iip
;
#endif
siginfo
.
si_addr
=
(
void
*
)
ifa
;
break
;
case
35
:
siginfo
.
si_code
=
TRAP_BRANCH
;
break
;
case
36
:
siginfo
.
si_code
=
TRAP_TRACE
;
break
;
case
35
:
siginfo
.
si_code
=
TRAP_BRANCH
;
ifa
=
0
;
break
;
case
36
:
siginfo
.
si_code
=
TRAP_TRACE
;
ifa
=
0
;
break
;
}
siginfo
.
si_signo
=
SIGTRAP
;
siginfo
.
si_errno
=
0
;
siginfo
.
si_flags
=
0
;
siginfo
.
si_isr
=
0
;
siginfo
.
si_addr
=
0
;
siginfo
.
si_addr
=
(
void
*
)
ifa
;
siginfo
.
si_imm
=
0
;
force_sig_info
(
SIGTRAP
,
&
siginfo
,
current
);
return
;
...
...
arch/ia64/kernel/unwind.c
View file @
8beb1642
...
...
@@ -140,13 +140,13 @@ static struct {
}
stat
;
# endif
}
unw
=
{
tables:
&
unw
.
kernel_table
,
lock:
SPIN_LOCK_UNLOCKED
,
save_order:
{
.
tables
=
&
unw
.
kernel_table
,
.
lock
=
SPIN_LOCK_UNLOCKED
,
.
save_order
=
{
UNW_REG_RP
,
UNW_REG_PFS
,
UNW_REG_PSP
,
UNW_REG_PR
,
UNW_REG_UNAT
,
UNW_REG_LC
,
UNW_REG_FPSR
,
UNW_REG_PRI_UNAT_GR
},
preg_index:
{
.
preg_index
=
{
struct_offset
(
struct
unw_frame_info
,
pri_unat_loc
)
/
8
,
/* PRI_UNAT_GR */
struct_offset
(
struct
unw_frame_info
,
pri_unat_loc
)
/
8
,
/* PRI_UNAT_MEM */
struct_offset
(
struct
unw_frame_info
,
bsp_loc
)
/
8
,
...
...
@@ -189,9 +189,9 @@ static struct {
struct_offset
(
struct
unw_frame_info
,
fr_loc
[
30
-
16
])
/
8
,
struct_offset
(
struct
unw_frame_info
,
fr_loc
[
31
-
16
])
/
8
,
},
hash
:
{
[
0
...
UNW_HASH_SIZE
-
1
]
=
-
1
},
.
hash
=
{
[
0
...
UNW_HASH_SIZE
-
1
]
=
-
1
},
#if UNW_DEBUG
preg_name:
{
.
preg_name
=
{
"pri_unat_gr"
,
"pri_unat_mem"
,
"bsp"
,
"bspstore"
,
"ar.pfs"
,
"ar.rnat"
,
"psp"
,
"rp"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"ar.unat"
,
"pr"
,
"ar.lc"
,
"ar.fpsr"
,
...
...
@@ -634,8 +634,8 @@ alloc_spill_area (unsigned long *offp, unsigned long regsize,
for
(
reg
=
hi
;
reg
>=
lo
;
--
reg
)
{
if
(
reg
->
where
==
UNW_WHERE_SPILL_HOME
)
{
reg
->
where
=
UNW_WHERE_PSPREL
;
reg
->
val
=
0x10
-
*
offp
;
*
offp
+=
regsize
;
*
offp
-=
regsize
;
reg
->
val
=
*
offp
;
}
}
}
...
...
@@ -814,7 +814,8 @@ desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *s
}
for
(
i
=
0
;
i
<
20
;
++
i
)
{
if
((
frmask
&
1
)
!=
0
)
{
set_reg
(
sr
->
curr
.
reg
+
UNW_REG_F2
+
i
,
UNW_WHERE_SPILL_HOME
,
int
base
=
(
i
<
4
)
?
UNW_REG_F2
:
UNW_REG_F16
-
4
;
set_reg
(
sr
->
curr
.
reg
+
base
+
i
,
UNW_WHERE_SPILL_HOME
,
sr
->
region_start
+
sr
->
region_len
-
1
,
0
);
sr
->
any_spills
=
1
;
}
...
...
arch/ia64/lib/Makefile
View file @
8beb1642
...
...
@@ -9,12 +9,12 @@ export-objs := io.o swiotlb.o
obj-y
:=
__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o
\
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
\
checksum.o clear_page.o csum_partial_copy.o copy_page.o
\
c
opy_user.o c
lear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o
\
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o
\
flush.o io.o ip_fast_csum.o do_csum.o
\
mem
cpy.o mem
set.o strlen.o swiotlb.o
memset.o strlen.o swiotlb.o
obj-$(CONFIG_ITANIUM)
+=
copy_page.o
obj-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o
obj-$(CONFIG_ITANIUM)
+=
copy_page.o
copy_user.o memcpy.o
obj-$(CONFIG_MCKINLEY)
+=
copy_page_mck.o
memcpy_mck.o
IGNORE_FLAGS_OBJS
=
__divsi3.o __udivsi3.o __modsi3.o __umodsi3.o
\
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
...
...
arch/ia64/lib/copy_user.S
View file @
8beb1642
...
...
@@ -237,15 +237,17 @@ GLOBAL_ENTRY(__copy_user)
.
copy_user_bit
##
rshift
:
\
1
:
\
EX
(.
failure_out
,(
EPI
)
st8
[
dst1
]=
tmp
,
8
)
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
3
],
val1
[
PIPE_DEPTH
-
2
],
rshift
; \
EX
(3
f
,(
p16
)
ld8
val1
[
0
]=[
src1
],
8
)
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
2
],
val1
[
PIPE_DEPTH
-
1
],
rshift
; \
EX
(3
f
,(
p16
)
ld8
val1
[
1
]=[
src1
],
8
)
; \
(
p16
)
mov
val1
[
0
]=
r0
; \
br.ctop.dptk
1
b
; \
;; \
br.cond.sptk.many
.
diff_align_do_tail
; \
2
:
\
(
EPI
)
st8
[
dst1
]=
tmp
,
8
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
3
],
val1
[
PIPE_DEPTH
-
2
],
rshift
; \
(
EPI_1
)
shrp
tmp
=
val1
[
PIPE_DEPTH
-
2
],
val1
[
PIPE_DEPTH
-
1
],
rshift
; \
3
:
\
(
p16
)
mov
val1
[
1
]=
r0
; \
(
p16
)
mov
val1
[
0
]=
r0
; \
br.ctop.dptk
2
b
; \
;; \
...
...
arch/ia64/lib/io.c
View file @
8beb1642
...
...
@@ -87,6 +87,12 @@ ia64_outl (unsigned int val, unsigned long port)
__ia64_outl
(
val
,
port
);
}
void
ia64_mmiob
(
void
)
{
__ia64_mmiob
();
}
/* define aliases: */
asm
(
".global __ia64_inb, __ia64_inw, __ia64_inl"
);
...
...
@@ -99,4 +105,7 @@ asm ("__ia64_outb = ia64_outb");
asm
(
"__ia64_outw = ia64_outw"
);
asm
(
"__ia64_outl = ia64_outl"
);
asm
(
".global __ia64_mmiob"
);
asm
(
"__ia64_mmiob = ia64_mmiob"
);
#endif
/* CONFIG_IA64_GENERIC */
arch/ia64/lib/memcpy_mck.S
0 → 100644
View file @
8beb1642
/*
*
Itanium
2
-
optimized
version
of
memcpy
and
copy_user
function
*
*
Inputs
:
*
in0
:
destination
address
*
in1
:
source
address
*
in2
:
number
of
bytes
to
copy
*
Output
:
*
0
if
success
,
or
number
of
byte
NOT
copied
if
error
occurred
.
*
*
Copyright
(
C
)
2002
Intel
Corp
.
*
Copyright
(
C
)
2002
Ken
Chen
<
kenneth
.
w
.
chen
@
intel
.
com
>
*/
#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
#if __GNUC__ >= 3
# define EK(y...) EX(y)
#else
# define EK(y,x...) x
#endif
GLOBAL_ENTRY
(
bcopy
)
.
regstk
3
,
0
,
0
,
0
mov
r8
=
in0
mov
in0
=
in1
;;
mov
in1
=
r8
;;
END
(
bcopy
)
/*
McKinley
specific
optimization
*/
#define retval r8
#define saved_pfs r31
#define saved_lc r10
#define saved_pr r11
#define saved_in0 r14
#define saved_in1 r15
#define saved_in2 r16
#define src0 r2
#define src1 r3
#define dst0 r17
#define dst1 r18
#define cnt r9
/*
r19
-
r30
are
temp
for
each
code
section
*/
#define PREFETCH_DIST 8
#define src_pre_mem r19
#define dst_pre_mem r20
#define src_pre_l2 r21
#define dst_pre_l2 r22
#define t1 r23
#define t2 r24
#define t3 r25
#define t4 r26
#define t5 t1 // alias!
#define t6 t2 // alias!
#define t7 t3 // alias!
#define n8 r27
#define t9 t5 // alias!
#define t10 t4 // alias!
#define t11 t7 // alias!
#define t12 t6 // alias!
#define t14 t10 // alias!
#define t13 r28
#define t15 r29
#define tmp r30
/*
defines
for
long_copy
block
*/
#define A 0
#define B (PREFETCH_DIST)
#define C (B + PREFETCH_DIST)
#define D (C + 1)
#define N (D + 1)
#define Nrot ((N + 7) & ~7)
/*
alias
*/
#define in0 r32
#define in1 r33
#define in2 r34
GLOBAL_ENTRY
(
memcpy
)
and
r28
=
0x7
,
in0
and
r29
=
0x7
,
in1
mov
f6
=
f0
br.cond.sptk
.
common_code
;;
GLOBAL_ENTRY
(
__copy_user
)
.
prologue
//
check
dest
alignment
and
r28
=
0x7
,
in0
and
r29
=
0x7
,
in1
mov
f6
=
f1
mov
saved_in0
=
in0
//
save
dest
pointer
mov
saved_in1
=
in1
//
save
src
pointer
mov
saved_in2
=
in2
//
save
len
;;
.
common_code
:
cmp.gt
p15
,
p0
=
8
,
in2
//
check
for
small
size
cmp.ne
p13
,
p0
=
0
,
r28
//
check
dest
alignment
cmp.ne
p14
,
p0
=
0
,
r29
//
check
src
alignment
add
src0
=
0
,
in1
sub
r30
=
8
,
r28
//
for
.
align_dest
mov
retval
=
r0
//
initialize
return
value
;;
add
dst0
=
0
,
in0
add
dst1
=
1
,
in0
//
dest
odd
index
cmp.le
p6
,
p0
=
1
,
r30
//
for
.
align_dest
(
p15
)
br.cond.dpnt
.
memcpy_short
(
p13
)
br.cond.dpnt
.
align_dest
(
p14
)
br.cond.dpnt
.
unaligned_src
;;
//
both
dest
and
src
are
aligned
on
8
-
byte
boundary
.
aligned_src
:
.
save
ar
.
pfs
,
saved_pfs
alloc
saved_pfs
=
ar
.
pfs
,
3
,
Nrot
-
3
,
0
,
Nrot
.
save
pr
,
saved_pr
mov
saved_pr
=
pr
shr.u
cnt
=
in2
,
7
//
this
much
cache
line
;;
cmp.lt
p6
,
p0
=
2
*
PREFETCH_DIST
,
cnt
cmp.lt
p7
,
p8
=
1
,
cnt
.
save
ar
.
lc
,
saved_lc
mov
saved_lc
=
ar
.
lc
.
body
add
cnt
=-
1
,
cnt
add
src_pre_mem
=
0
,
in1
//
prefetch
src
pointer
add
dst_pre_mem
=
0
,
in0
//
prefetch
dest
pointer
;;
(
p7
)
mov
ar
.
lc
=
cnt
//
prefetch
count
(
p8
)
mov
ar
.
lc
=
r0
(
p6
)
br.cond.dpnt
.
long_copy
;;
.
prefetch
:
lfetch.fault
[
src_pre_mem
],
128
lfetch.fault.excl
[
dst_pre_mem
],
128
br.cloop.dptk.few
.
prefetch
;;
.
medium_copy
:
and
tmp
=
31
,
in2
//
copy
length
after
iteration
shr.u
r29
=
in2
,
5
//
number
of
32
-
byte
iteration
add
dst1
=
8
,
dst0
//
2
nd
dest
pointer
;;
add
cnt
=-
1
,
r29
//
ctop
iteration
adjustment
cmp.eq
p10
,
p0
=
r29
,
r0
//
do
we
really
need
to
loop
?
add
src1
=
8
,
src0
//
2
nd
src
pointer
cmp.le
p6
,
p0
=
8
,
tmp
;;
cmp.le
p7
,
p0
=
16
,
tmp
mov
ar
.
lc
=
cnt
//
loop
setup
cmp.eq
p16
,
p17
=
r0
,
r0
mov
ar
.
ec
=
2
(
p10
)
br.dpnt.few
.
aligned_src_tail
;;
.
align
32
1
:
EX
(.
ex_handler
,
(
p16
)
ld8
r34
=[
src0
],
16
)
EK
(.
ex_handler
,
(
p16
)
ld8
r38
=[
src1
],
16
)
EX
(.
ex_handler
,
(
p17
)
st8
[
dst0
]=
r33
,
16
)
EK
(.
ex_handler
,
(
p17
)
st8
[
dst1
]=
r37
,
16
)
;;
EX
(.
ex_handler
,
(
p16
)
ld8
r32
=[
src0
],
16
)
EK
(.
ex_handler
,
(
p16
)
ld8
r36
=[
src1
],
16
)
EX
(.
ex_handler
,
(
p16
)
st8
[
dst0
]=
r34
,
16
)
EK
(.
ex_handler
,
(
p16
)
st8
[
dst1
]=
r38
,
16
)
br.ctop.dptk.few
1
b
;;
.
aligned_src_tail
:
EX
(.
ex_handler
,
(
p6
)
ld8
t1
=[
src0
])
mov
ar
.
lc
=
saved_lc
mov
ar
.
pfs
=
saved_pfs
EX
(.
ex_hndlr_s
,
(
p7
)
ld8
t2
=[
src1
],
8
)
cmp.le
p8
,
p0
=
24
,
tmp
and
r21
=-
8
,
tmp
;;
EX
(.
ex_hndlr_s
,
(
p8
)
ld8
t3
=[
src1
])
EX
(.
ex_handler
,
(
p6
)
st8
[
dst0
]=
t1
)
//
store
byte
1
and
in2
=
7
,
tmp
//
remaining
length
EX
(.
ex_hndlr_d
,
(
p7
)
st8
[
dst1
]=
t2
,
8
)
//
store
byte
2
add
src0
=
src0
,
r21
//
setting
up
src
pointer
add
dst0
=
dst0
,
r21
//
setting
up
dest
pointer
;;
EX
(.
ex_handler
,
(
p8
)
st8
[
dst1
]=
t3
)
//
store
byte
3
mov
pr
=
saved_pr
,-
1
br.dptk.many
.
memcpy_short
;;
/*
code
taken
from
copy_page_mck
*/
.
long_copy
:
.
rotr
v
[
2
*
PREFETCH_DIST
]
.
rotp
p
[
N
]
mov
src_pre_mem
=
src0
mov
pr
.
rot
=
0x10000
mov
ar
.
ec
=
1
//
special
unrolled
loop
mov
dst_pre_mem
=
dst0
add
src_pre_l2
=
8
*
8
,
src0
add
dst_pre_l2
=
8
*
8
,
dst0
;;
add
src0
=
8
,
src_pre_mem
//
first
t1
src
mov
ar
.
lc
=
2
*
PREFETCH_DIST
-
1
shr.u
cnt
=
in2
,
7
//
number
of
lines
add
src1
=
3
*
8
,
src_pre_mem
//
first
t3
src
add
dst0
=
8
,
dst_pre_mem
//
first
t1
dst
add
dst1
=
3
*
8
,
dst_pre_mem
//
first
t3
dst
;;
and
tmp
=
127
,
in2
//
remaining
bytes
after
this
block
add
cnt
=
-(
2
*
PREFETCH_DIST
)
-
1
,
cnt
//
same
as
.
line_copy
loop
,
but
with
all
predicated
-
off
instructions
removed
:
.
prefetch_loop
:
EX
(.
ex_hndlr_lcpy_1
,
(
p
[
A
])
ld8
v
[
A
]
=
[
src_pre_mem
],
128
)
//
M0
EK
(.
ex_hndlr_lcpy_1
,
(
p
[
B
])
st8
[
dst_pre_mem
]
=
v
[
B
],
128
)
//
M2
br.ctop.sptk
.
prefetch_loop
;;
cmp.eq
p16
,
p0
=
r0
,
r0
//
reset
p16
to
1
mov
ar
.
lc
=
cnt
mov
ar
.
ec
=
N
//
#
of
stages
in
pipeline
;;
.
line_copy
:
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t2
=
[
src0
],
3
*
8
)
//
M0
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t4
=
[
src1
],
3
*
8
)
//
M1
EX
(.
ex_handler_lcpy
,
(
p
[
B
])
st8
[
dst_pre_mem
]
=
v
[
B
],
128
)
//
M2
prefetch
dst
from
memory
EK
(.
ex_handler_lcpy
,
(
p
[
D
])
st8
[
dst_pre_l2
]
=
n8
,
128
)
//
M3
prefetch
dst
from
L2
;;
EX
(.
ex_handler_lcpy
,
(
p
[
A
])
ld8
v
[
A
]
=
[
src_pre_mem
],
128
)
//
M0
prefetch
src
from
memory
EK
(.
ex_handler_lcpy
,
(
p
[
C
])
ld8
n8
=
[
src_pre_l2
],
128
)
//
M1
prefetch
src
from
L2
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t1
,
8
)
//
M2
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t3
,
8
)
//
M3
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t5
=
[
src0
],
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t7
=
[
src1
],
3
*
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t2
,
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t4
,
3
*
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t6
=
[
src0
],
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t10
=
[
src1
],
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t5
,
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t7
,
3
*
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t9
=
[
src0
],
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t11
=
[
src1
],
3
*
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t6
,
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t10
,
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t12
=
[
src0
],
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t14
=
[
src1
],
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t9
,
3
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t11
,
3
*
8
)
;;
EX
(.
ex_handler
,
(
p
[
D
])
ld8
t13
=
[
src0
],
4
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
ld8
t15
=
[
src1
],
4
*
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t12
,
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t14
,
8
)
;;
EX
(.
ex_handler
,
(
p
[
C
])
ld8
t1
=
[
src0
],
8
)
EK
(.
ex_handler
,
(
p
[
C
])
ld8
t3
=
[
src1
],
8
)
EX
(.
ex_handler
,
(
p
[
D
])
st8
[
dst0
]
=
t13
,
4
*
8
)
EK
(.
ex_handler
,
(
p
[
D
])
st8
[
dst1
]
=
t15
,
4
*
8
)
br.ctop.sptk
.
line_copy
;;
add
dst0
=-
8
,
dst0
add
src0
=-
8
,
src0
mov
in2
=
tmp
.
restore
sp
br.sptk.many
.
medium_copy
;;
#define BLOCK_SIZE 128*32
#define blocksize r23
#define curlen r24
//
dest
is
on
8
-
byte
boundary
,
src
is
not
.
We
need
to
do
//
ld8
-
ld8
,
shrp
,
then
st8
.
Max
8
byte
copy
per
cycle
.
.
unaligned_src
:
.
prologue
.
save
ar
.
pfs
,
saved_pfs
alloc
saved_pfs
=
ar
.
pfs
,
3
,
5
,
0
,
8
.
save
ar
.
lc
,
saved_lc
mov
saved_lc
=
ar
.
lc
.
save
pr
,
saved_pr
mov
saved_pr
=
pr
.
body
.
4k_block
:
mov
saved_in0
=
dst0
//
need
to
save
all
input
arguments
mov
saved_in2
=
in2
mov
blocksize
=
BLOCK_SIZE
;;
cmp.lt
p6
,
p7
=
blocksize
,
in2
mov
saved_in1
=
src0
;;
(
p6
)
mov
in2
=
blocksize
;;
shr.u
r21
=
in2
,
7
//
this
much
cache
line
shr.u
r22
=
in2
,
4
//
number
of
16
-
byte
iteration
and
curlen
=
15
,
in2
//
copy
length
after
iteration
and
r30
=
7
,
src0
//
source
alignment
;;
cmp.lt
p7
,
p8
=
1
,
r21
add
cnt
=-
1
,
r21
;;
add
src_pre_mem
=
0
,
src0
//
prefetch
src
pointer
add
dst_pre_mem
=
0
,
dst0
//
prefetch
dest
pointer
and
src0
=-
8
,
src0
//
1
st
src
pointer
(
p7
)
mov
ar
.
lc
=
r21
(
p8
)
mov
ar
.
lc
=
r0
;;
.
align
32
1
:
lfetch.fault
[
src_pre_mem
],
128
lfetch.fault.excl
[
dst_pre_mem
],
128
br.cloop.dptk.few
1
b
;;
shladd
dst1
=
r22
,
3
,
dst0
//
2
nd
dest
pointer
shladd
src1
=
r22
,
3
,
src0
//
2
nd
src
pointer
cmp.eq
p8
,
p9
=
r22
,
r0
//
do
we
really
need
to
loop
?
cmp.le
p6
,
p7
=
8
,
curlen
; // have at least 8 byte remaining?
add
cnt
=-
1
,
r22
//
ctop
iteration
adjustment
;;
EX
(.
ex_handler
,
(
p9
)
ld8
r33
=[
src0
],
8
)
//
loop
primer
EK
(.
ex_handler
,
(
p9
)
ld8
r37
=[
src1
],
8
)
(
p8
)
br.dpnt.few
.
noloop
;;
//
The
jump
address
is
calculated
based
on
src
alignment
.
The
COPYU
//
macro
below
need
to
confine
its
size
to
power
of
two
,
so
an
entry
//
can
be
caulated
using
shl
instead
of
an
expensive
multiply
.
The
//
size
is
then
hard
coded
by
the
following
#
define
to
match
the
//
actual
size
.
This
make
it
somewhat
tedious
when
COPYU
macro
gets
//
changed
and
this
need
to
be
adjusted
to
match
.
#define LOOP_SIZE 6
1
:
mov
r29
=
ip
//
jmp_table
thread
mov
ar
.
lc
=
cnt
;;
add
r29
=
.
jump_table
-
1
b
-
(
.
jmp1
-
.
jump_table
),
r29
shl
r28
=
r30
,
LOOP_SIZE
//
jmp_table
thread
mov
ar
.
ec
=
2
//
loop
setup
;;
add
r29
=
r29
,
r28
//
jmp_table
thread
cmp.eq
p16
,
p17
=
r0
,
r0
;;
mov
b6
=
r29
//
jmp_table
thread
;;
br.cond.sptk.few
b6
//
for
8
-
15
byte
case
//
We
will
skip
the
loop
,
but
need
to
replicate
the
side
effect
//
that
the
loop
produces
.
.
noloop
:
EX
(.
ex_handler
,
(
p6
)
ld8
r37
=[
src1
],
8
)
add
src0
=
8
,
src0
(
p6
)
shl
r25
=
r30
,
3
;;
EX
(.
ex_handler
,
(
p6
)
ld8
r27
=[
src1
])
(
p6
)
shr.u
r28
=
r37
,
r25
(
p6
)
sub
r26
=
64
,
r25
;;
(
p6
)
shl
r27
=
r27
,
r26
;;
(
p6
)
or
r21
=
r28
,
r27
.
unaligned_src_tail
:
/*
check
if
we
have
more
than
blocksize
to
copy
,
if
so
go
back
*/
cmp.gt
p8
,
p0
=
saved_in2
,
blocksize
;;
(
p8
)
add
dst0
=
saved_in0
,
blocksize
(
p8
)
add
src0
=
saved_in1
,
blocksize
(
p8
)
sub
in2
=
saved_in2
,
blocksize
(
p8
)
br.dpnt
.4
k_block
;;
/*
we
have
up
to
15
byte
to
copy
in
the
tail
.
*
part
of
work
is
already
done
in
the
jump
table
code
*
we
are
at
the
following
state
.
*
src
side
:
*
*
xxxxxx
xx
<-----
r21
has
xxxxxxxx
already
*
--------
--------
--------
*
0
8
16
*
^
*
|
*
src1
*
*
dst
*
--------
--------
--------
*
^
*
|
*
dst1
*/
EX
(.
ex_handler
,
(
p6
)
st8
[
dst1
]=
r21
,
8
)
//
more
than
8
byte
to
copy
(
p6
)
add
curlen
=-
8
,
curlen
//
update
length
mov
ar
.
pfs
=
saved_pfs
;;
mov
ar
.
lc
=
saved_lc
mov
pr
=
saved_pr
,-
1
mov
in2
=
curlen
//
remaining
length
mov
dst0
=
dst1
//
dest
pointer
add
src0
=
src1
,
r30
//
forward
by
src
alignment
;;
//
7
byte
or
smaller
.
.
memcpy_short
:
cmp.le
p8
,
p9
=
1
,
in2
cmp.le
p10
,
p11
=
2
,
in2
cmp.le
p12
,
p13
=
3
,
in2
cmp.le
p14
,
p15
=
4
,
in2
add
src1
=
1
,
src0
//
second
src
pointer
add
dst1
=
1
,
dst0
//
second
dest
pointer
;;
EX
(.
ex_handler_short
,
(
p8
)
ld1
t1
=[
src0
],
2
)
EK
(.
ex_handler_short
,
(
p10
)
ld1
t2
=[
src1
],
2
)
(
p9
)
br.ret.dpnt
rp
//
0
byte
copy
;;
EX
(.
ex_handler_short
,
(
p8
)
st1
[
dst0
]=
t1
,
2
)
EK
(.
ex_handler_short
,
(
p10
)
st1
[
dst1
]=
t2
,
2
)
(
p11
)
br.ret.dpnt
rp
//
1
byte
copy
EX
(.
ex_handler_short
,
(
p12
)
ld1
t3
=[
src0
],
2
)
EK
(.
ex_handler_short
,
(
p14
)
ld1
t4
=[
src1
],
2
)
(
p13
)
br.ret.dpnt
rp
//
2
byte
copy
;;
cmp.le
p6
,
p7
=
5
,
in2
cmp.le
p8
,
p9
=
6
,
in2
cmp.le
p10
,
p11
=
7
,
in2
EX
(.
ex_handler_short
,
(
p12
)
st1
[
dst0
]=
t3
,
2
)
EK
(.
ex_handler_short
,
(
p14
)
st1
[
dst1
]=
t4
,
2
)
(
p15
)
br.ret.dpnt
rp
//
3
byte
copy
;;
EX
(.
ex_handler_short
,
(
p6
)
ld1
t5
=[
src0
],
2
)
EK
(.
ex_handler_short
,
(
p8
)
ld1
t6
=[
src1
],
2
)
(
p7
)
br.ret.dpnt
rp
//
4
byte
copy
;;
EX
(.
ex_handler_short
,
(
p6
)
st1
[
dst0
]=
t5
,
2
)
EK
(.
ex_handler_short
,
(
p8
)
st1
[
dst1
]=
t6
,
2
)
(
p9
)
br.ret.dptk
rp
//
5
byte
copy
EX
(.
ex_handler_short
,
(
p10
)
ld1
t7
=[
src0
],
2
)
(
p11
)
br.ret.dptk
rp
//
6
byte
copy
;;
EX
(.
ex_handler_short
,
(
p10
)
st1
[
dst0
]=
t7
,
2
)
br.ret.dptk
rp
//
done
all
cases
/*
Align
dest
to
nearest
8
-
byte
boundary
.
We
know
we
have
at
*
least
7
bytes
to
copy
,
enough
to
crawl
to
8
-
byte
boundary
.
*
Actual
number
of
byte
to
crawl
depend
on
the
dest
alignment
.
*
7
byte
or
less
is
taken
care
at
.
memcpy_short
*
src0
-
source
even
index
*
src1
-
source
odd
index
*
dst0
-
dest
even
index
*
dst1
-
dest
odd
index
*
r30
-
distance
to
8
-
byte
boundary
*/
.
align_dest
:
add
src1
=
1
,
in1
//
source
odd
index
cmp.le
p7
,
p0
=
2
,
r30
//
for
.
align_dest
cmp.le
p8
,
p0
=
3
,
r30
//
for
.
align_dest
EX
(.
ex_handler_short
,
(
p6
)
ld1
t1
=[
src0
],
2
)
cmp.le
p9
,
p0
=
4
,
r30
//
for
.
align_dest
cmp.le
p10
,
p0
=
5
,
r30
;;
EX
(.
ex_handler_short
,
(
p7
)
ld1
t2
=[
src1
],
2
)
EK
(.
ex_handler_short
,
(
p8
)
ld1
t3
=[
src0
],
2
)
cmp.le
p11
,
p0
=
6
,
r30
EX
(.
ex_handler_short
,
(
p6
)
st1
[
dst0
]
=
t1
,
2
)
cmp.le
p12
,
p0
=
7
,
r30
;;
EX
(.
ex_handler_short
,
(
p9
)
ld1
t4
=[
src1
],
2
)
EK
(.
ex_handler_short
,
(
p10
)
ld1
t5
=[
src0
],
2
)
EX
(.
ex_handler_short
,
(
p7
)
st1
[
dst1
]
=
t2
,
2
)
EK
(.
ex_handler_short
,
(
p8
)
st1
[
dst0
]
=
t3
,
2
)
;;
EX
(.
ex_handler_short
,
(
p11
)
ld1
t6
=[
src1
],
2
)
EK
(.
ex_handler_short
,
(
p12
)
ld1
t7
=[
src0
],
2
)
cmp.eq
p6
,
p7
=
r28
,
r29
EX
(.
ex_handler_short
,
(
p9
)
st1
[
dst1
]
=
t4
,
2
)
EK
(.
ex_handler_short
,
(
p10
)
st1
[
dst0
]
=
t5
,
2
)
sub
in2
=
in2
,
r30
;;
EX
(.
ex_handler_short
,
(
p11
)
st1
[
dst1
]
=
t6
,
2
)
EK
(.
ex_handler_short
,
(
p12
)
st1
[
dst0
]
=
t7
)
add
dst0
=
in0
,
r30
//
setup
arguments
add
src0
=
in1
,
r30
(
p6
)
br.cond.dptk
.
aligned_src
(
p7
)
br.cond.dpnt
.
unaligned_src
;;
/*
main
loop
body
in
jump
table
format
*/
#define COPYU(shift) \
1
:
\
EX
(.
ex_handler
,
(
p16
)
ld8
r32
=[
src0
],
8
)
; /* 1 */ \
EK
(.
ex_handler
,
(
p16
)
ld8
r36
=[
src1
],
8
)
; \
(
p17
)
shrp
r35
=
r33
,
r34
,
shift
;; /* 1 */ \
EX
(.
ex_handler
,
(
p6
)
ld8
r22
=[
src1
])
; /* common, prime for tail section */ \
nop.m
0
; \
(
p16
)
shrp
r38
=
r36
,
r37
,
shift
; \
EX
(.
ex_handler
,
(
p17
)
st8
[
dst0
]=
r35
,
8
)
; /* 1 */ \
EK
(.
ex_handler
,
(
p17
)
st8
[
dst1
]=
r39
,
8
)
; \
br.ctop.dptk.few
1
b
;; \
(
p7
)
add
src1
=-
8
,
src1
; /* back out for <8 byte case */ \
shrp
r21
=
r22
,
r38
,
shift
; /* speculative work */ \
br.sptk.few
.
unaligned_src_tail
/*
branch
out
of
jump
table
*/
\
;;
.
align
32
.
jump_table
:
COPYU
(8)
//
unaligned
cases
.
jmp1
:
COPYU
(16)
COPYU
(24)
COPYU
(32)
COPYU
(40)
COPYU
(48)
COPYU
(56)
#undef A
#undef B
#undef C
#undef D
END
(
memcpy
)
/*
*
Due
to
lack
of
local
tag
support
in
gcc
2
.
x
assembler
,
it
is
not
clear
which
*
instruction
failed
in
the
bundle
.
The
exception
algorithm
is
that
we
*
first
figure
out
the
faulting
address
,
then
detect
if
there
is
any
*
progress
made
on
the
copy
,
if
so
,
redo
the
copy
from
last
known
copied
*
location
up
to
the
faulting
address
(
exclusive
)
.
In
the
copy_from_user
*
case
,
remaining
byte
in
kernel
buffer
will
be
zeroed
.
*
*
Take
copy_from_user
as
an
example
,
in
the
code
there
are
multiple
loads
*
in
a
bundle
and
those
multiple
loads
could
span
over
two
pages
,
the
*
faulting
address
is
calculated
as
page_round_down
(
max
(
src0
,
src1
))
.
*
This
is
based
on
knowledge
that
if
we
can
access
one
byte
in
a
page
,
we
*
can
access
any
byte
in
that
page
.
*
*
predicate
used
in
the
exception
handler
:
*
p6
-
p7
:
direction
*
p10
-
p11
:
src
faulting
addr
calculation
*
p12
-
p13
:
dst
faulting
addr
calculation
*/
#define A r19
#define B r20
#define C r21
#define D r22
#define F r28
#define memset_arg0 r32
#define memset_arg2 r33
#define saved_retval loc0
#define saved_rtlink loc1
#define saved_pfs_stack loc2
.
ex_hndlr_s
:
add
src0
=
8
,
src0
br.sptk
.
ex_handler
;;
.
ex_hndlr_d
:
add
dst0
=
8
,
dst0
br.sptk
.
ex_handler
;;
.
ex_hndlr_lcpy_1
:
mov
src1
=
src_pre_mem
mov
dst1
=
dst_pre_mem
cmp.gtu
p10
,
p11
=
src_pre_mem
,
saved_in1
cmp.gtu
p12
,
p13
=
dst_pre_mem
,
saved_in0
;;
(
p10
)
add
src0
=
8
,
saved_in1
(
p11
)
mov
src0
=
saved_in1
(
p12
)
add
dst0
=
8
,
saved_in0
(
p13
)
mov
dst0
=
saved_in0
br.sptk
.
ex_handler
.
ex_handler_lcpy
:
//
in
line_copy
block
,
the
preload
addresses
should
always
ahead
//
of
the
other
two
src
/
dst
pointers
.
Furthermore
,
src1
/
dst1
should
//
always
ahead
of
src0
/
dst0
.
mov
src1
=
src_pre_mem
mov
dst1
=
dst_pre_mem
.
ex_handler
:
mov
pr
=
saved_pr
,-
1
//
first
restore
pr
,
lc
,
and
pfs
mov
ar
.
lc
=
saved_lc
mov
ar
.
pfs
=
saved_pfs
;;
.
ex_handler_short
:
//
fault
occurred
in
these
sections
didn
't change pr, lc, pfs
cmp.ltu
p6
,
p7
=
saved_in0
,
saved_in1
//
get
the
copy
direction
cmp.ltu
p10
,
p11
=
src0
,
src1
cmp.ltu
p12
,
p13
=
dst0
,
dst1
fcmp.eq
p8
,
p0
=
f6
,
f0
//
is
it
memcpy
?
mov
tmp
=
dst0
;;
(
p11
)
mov
src1
=
src0
//
pick
the
larger
of
the
two
(
p13
)
mov
dst0
=
dst1
//
make
dst0
the
smaller
one
(
p13
)
mov
dst1
=
tmp
//
and
dst1
the
larger
one
;;
(
p6
)
dep
F
=
r0
,
dst1
,
0
,
PAGE_SHIFT
//
usr
dst
round
down
to
page
boundary
(
p7
)
dep
F
=
r0
,
src1
,
0
,
PAGE_SHIFT
//
usr
src
round
down
to
page
boundary
;;
(
p6
)
cmp.le
p14
,
p0
=
dst0
,
saved_in0
//
no
progress
has
been
made
on
store
(
p7
)
cmp.le
p14
,
p0
=
src0
,
saved_in1
//
no
progress
has
been
made
on
load
mov
retval
=
saved_in2
(
p8
)
ld1
tmp
=[
src1
]
//
force
an
oops
for
memcpy
call
(
p8
)
st1
[
dst1
]=
r0
//
force
an
oops
for
memcpy
call
(
p14
)
br.ret.sptk.many
rp
/*
*
The
remaining
byte
to
copy
is
calculated
as
:
*
*
A
=
(
faulting_addr
-
orig_src
)
->
len
to
faulting
ld
address
*
or
*
(
faulting_addr
-
orig_dst
)
->
len
to
faulting
st
address
*
B
=
(
cur_dst
-
orig_dst
)
->
len
copied
so
far
*
C
=
A
-
B
->
len
need
to
be
copied
*
D
=
orig_len
-
A
->
len
need
to
be
zeroed
*/
(
p6
)
sub
A
=
F
,
saved_in0
(
p7
)
sub
A
=
F
,
saved_in1
clrrrb
;;
alloc
saved_pfs_stack
=
ar
.
pfs
,
3
,
3
,
3
,
0
sub
B
=
dst0
,
saved_in0
//
how
many
byte
copied
so
far
;;
sub
C
=
A
,
B
sub
D
=
saved_in2
,
A
;;
cmp.gt
p8
,
p0
=
C
,
r0
//
more
than
1
byte
?
add
memset_arg0
=
saved_in0
,
A
(
p6
)
mov
memset_arg2
=
0
//
copy_to_user
should
not
call
memset
(
p7
)
mov
memset_arg2
=
D
//
copy_from_user
need
to
have
kbuf
zeroed
mov
r8
=
0
mov
saved_retval
=
D
mov
saved_rtlink
=
b0
add
out0
=
saved_in0
,
B
add
out1
=
saved_in1
,
B
mov
out2
=
C
(
p8
)
br.call.sptk.few
b0
=
__copy_user
//
recursive
call
;;
add
saved_retval
=
saved_retval
,
r8
//
above
might
return
non
-
zero
value
cmp.gt
p8
,
p0
=
memset_arg2
,
r0
//
more
than
1
byte
?
mov
out0
=
memset_arg0
//
*
s
mov
out1
=
r0
//
c
mov
out2
=
memset_arg2
//
n
(
p8
)
br.call.sptk.few
b0
=
memset
;;
mov
retval
=
saved_retval
mov
ar
.
pfs
=
saved_pfs_stack
mov
b0
=
saved_rtlink
br.ret.sptk.many
rp
/*
end
of
McKinley
specific
optimization
*/
END
(
__copy_user
)
arch/ia64/lib/swiotlb.c
View file @
8beb1642
...
...
@@ -415,18 +415,20 @@ int
swiotlb_map_sg
(
struct
pci_dev
*
hwdev
,
struct
scatterlist
*
sg
,
int
nelems
,
int
direction
)
{
void
*
addr
;
unsigned
long
pci_addr
;
int
i
;
if
(
direction
==
PCI_DMA_NONE
)
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
{
sg
->
orig_address
=
SG_ENT_VIRT_ADDRESS
(
sg
);
if
((
SG_ENT_PHYS_ADDRESS
(
sg
)
&
~
hwdev
->
dma_mask
)
!=
0
)
{
addr
=
map_single
(
hwdev
,
sg
->
orig_address
,
sg
->
length
,
direction
);
sg
->
page
=
virt_to_page
(
addr
);
sg
->
offset
=
(
u64
)
addr
&
~
PAGE_MASK
;
}
addr
=
SG_ENT_VIRT_ADDRESS
(
sg
);
pci_addr
=
virt_to_phys
(
addr
);
if
((
pci_addr
&
~
hwdev
->
dma_mask
)
!=
0
)
sg
->
dma_address
=
map_single
(
hwdev
,
addr
,
sg
->
length
,
direction
);
else
sg
->
dma_address
=
pci_addr
;
sg
->
dma_length
=
sg
->
length
;
}
return
nelems
;
}
...
...
@@ -444,12 +446,10 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
if
(
sg
->
orig_address
!=
SG_ENT_VIRT_ADDRESS
(
sg
))
{
unmap_single
(
hwdev
,
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
length
,
direction
);
sg
->
page
=
virt_to_page
(
sg
->
orig_address
);
sg
->
offset
=
(
u64
)
sg
->
orig_address
&
~
PAGE_MASK
;
}
else
if
(
direction
==
PCI_DMA_FROMDEVICE
)
mark_clean
(
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
length
);
if
(
sg
->
dma_address
!=
SG_ENT_PHYS_ADDRESS
(
sg
))
unmap_single
(
hwdev
,
sg
->
dma_address
,
sg
->
dma_length
,
direction
);
else
if
(
direction
==
PCI_DMA_FROMDEVICE
)
mark_clean
(
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
dma_length
);
}
/*
...
...
@@ -468,14 +468,14 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
BUG
();
for
(
i
=
0
;
i
<
nelems
;
i
++
,
sg
++
)
if
(
sg
->
orig_address
!=
SG_ENT_VIRT
_ADDRESS
(
sg
))
sync_single
(
hwdev
,
SG_ENT_VIRT_ADDRESS
(
sg
),
sg
->
length
,
direction
);
if
(
sg
->
dma_address
!=
SG_ENT_PHYS
_ADDRESS
(
sg
))
sync_single
(
hwdev
,
sg
->
dma_address
,
sg
->
dma_
length
,
direction
);
}
unsigned
long
swiotlb_dma_address
(
struct
scatterlist
*
sg
)
{
return
SG_ENT_PHYS_ADDRESS
(
sg
)
;
return
sg
->
dma_address
;
}
/*
...
...
arch/ia64/mm/init.c
View file @
8beb1642
...
...
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/swap.h>
...
...
@@ -68,10 +69,9 @@ ia64_init_addr_space (void)
struct
vm_area_struct
*
vma
;
/*
* If we're out of memory and kmem_cache_alloc() returns NULL,
* we simply ignore the problem. When the process attempts to
* write to the register backing store for the first time, it
* will get a SEGFAULT in this case.
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
if
(
vma
)
{
...
...
@@ -86,6 +86,19 @@ ia64_init_addr_space (void)
vma
->
vm_private_data
=
NULL
;
insert_vm_struct
(
current
->
mm
,
vma
);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if
(
!
(
current
->
personality
&
MMAP_PAGE_ZERO
))
{
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
if
(
vma
)
{
memset
(
vma
,
0
,
sizeof
(
*
vma
));
vma
->
vm_mm
=
current
->
mm
;
vma
->
vm_end
=
PAGE_SIZE
;
vma
->
vm_page_prot
=
__pgprot
(
pgprot_val
(
PAGE_READONLY
)
|
_PAGE_MA_NAT
);
vma
->
vm_flags
=
VM_READ
|
VM_MAYREAD
|
VM_IO
|
VM_RESERVED
;
insert_vm_struct
(
current
->
mm
,
vma
);
}
}
}
void
...
...
arch/ia64/mm/tlb.c
View file @
8beb1642
...
...
@@ -35,10 +35,10 @@
1 << _PAGE_SIZE_4K )
struct
ia64_ctx
ia64_ctx
=
{
lock:
SPIN_LOCK_UNLOCKED
,
next:
1
,
limit:
(
1
<<
15
)
-
1
,
/* start out with the safe (architected) limit */
max_ctx:
~
0U
.
lock
=
SPIN_LOCK_UNLOCKED
,
.
next
=
1
,
.
limit
=
(
1
<<
15
)
-
1
,
/* start out with the safe (architected) limit */
.
max_ctx
=
~
0U
};
/*
...
...
@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
{
unsigned
long
tsk_context
,
max_ctx
=
ia64_ctx
.
max_ctx
;
struct
task_struct
*
tsk
;
int
i
;
if
(
ia64_ctx
.
next
>
max_ctx
)
ia64_ctx
.
next
=
300
;
/* skip daemons */
...
...
@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
ia64_ctx
.
limit
=
tsk_context
;
}
read_unlock
(
&
tasklist_lock
);
flush_tlb_all
();
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for
(
i
=
0
;
i
<
smp_num_cpus
;
++
i
)
if
(
i
!=
smp_processor_id
())
per_cpu
(
ia64_need_tlb_flush
,
i
)
=
1
;
__flush_tlb_all
();
}
void
...
...
arch/ia64/sn/io/ifconfig_net.c
View file @
8beb1642
...
...
@@ -279,9 +279,9 @@ static int ifconfig_net_ioctl(struct inode * inode, struct file * file,
}
struct
file_operations
ifconfig_net_fops
=
{
ioctl:
ifconfig_net_ioctl
,
/* ioctl */
open:
ifconfig_net_open
,
/* open */
release:
ifconfig_net_close
/* release */
.
ioctl
=
ifconfig_net_ioctl
,
/* ioctl */
.
open
=
ifconfig_net_open
,
/* open */
.
release
=
ifconfig_net_close
/* release */
};
...
...
arch/ia64/sn/io/pciba.c
View file @
8beb1642
...
...
@@ -210,31 +210,31 @@ static void dump_allocations(struct list_head * dalp);
/* file operations for each type of node */
static
struct
file_operations
rom_fops
=
{
owner:
THIS_MODULE
,
mmap:
rom_mmap
,
open:
generic_open
,
release:
rom_release
.
owner
=
THIS_MODULE
,
.
mmap
=
rom_mmap
,
.
open
=
generic_open
,
.
release
=
rom_release
};
static
struct
file_operations
base_fops
=
{
owner:
THIS_MODULE
,
mmap:
base_mmap
,
open:
generic_open
.
owner
=
THIS_MODULE
,
.
mmap
=
base_mmap
,
.
open
=
generic_open
};
static
struct
file_operations
config_fops
=
{
owner:
THIS_MODULE
,
ioctl:
config_ioctl
,
open:
generic_open
.
owner
=
THIS_MODULE
,
.
ioctl
=
config_ioctl
,
.
open
=
generic_open
};
static
struct
file_operations
dma_fops
=
{
owner:
THIS_MODULE
,
ioctl:
dma_ioctl
,
mmap:
dma_mmap
,
open:
generic_open
.
owner
=
THIS_MODULE
,
.
ioctl
=
dma_ioctl
,
.
mmap
=
dma_mmap
,
.
open
=
generic_open
};
...
...
arch/ia64/sn/io/sn1/hubcounters.c
View file @
8beb1642
...
...
@@ -24,7 +24,7 @@ extern void hubni_error_handler(char *, int); /* huberror.c */
static
int
hubstats_ioctl
(
struct
inode
*
,
struct
file
*
,
unsigned
int
,
unsigned
long
);
struct
file_operations
hub_mon_fops
=
{
ioctl:
hubstats_ioctl
,
.
ioctl
=
hubstats_ioctl
,
};
#define HUB_CAPTURE_TICKS (2 * HZ)
...
...
arch/ia64/sn/io/sn1/pcibr.c
View file @
8beb1642
...
...
@@ -307,22 +307,22 @@ extern void free_pciio_dmamap(pcibr_dmamap_t);
* appropriate function name below.
*/
struct
file_operations
pcibr_fops
=
{
owner:
THIS_MODULE
,
llseek:
NULL
,
read:
NULL
,
write:
NULL
,
readdir:
NULL
,
poll:
NULL
,
ioctl:
NULL
,
mmap:
NULL
,
open:
NULL
,
flush:
NULL
,
release:
NULL
,
fsync:
NULL
,
fasync:
NULL
,
lock:
NULL
,
readv:
NULL
,
writev:
NULL
.
owner
=
THIS_MODULE
,
.
llseek
=
NULL
,
.
read
=
NULL
,
.
write
=
NULL
,
.
readdir
=
NULL
,
.
poll
=
NULL
,
.
ioctl
=
NULL
,
.
mmap
=
NULL
,
.
open
=
NULL
,
.
flush
=
NULL
,
.
release
=
NULL
,
.
fsync
=
NULL
,
.
fasync
=
NULL
,
.
lock
=
NULL
,
.
readv
=
NULL
,
.
writev
=
NULL
};
extern
devfs_handle_t
hwgraph_root
;
...
...
arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
View file @
8beb1642
...
...
@@ -64,22 +64,22 @@ int pcibr_devflag = D_MP;
* appropriate function name below.
*/
struct
file_operations
pcibr_fops
=
{
owner:
THIS_MODULE
,
llseek:
NULL
,
read:
NULL
,
write:
NULL
,
readdir:
NULL
,
poll:
NULL
,
ioctl:
NULL
,
mmap:
NULL
,
open:
NULL
,
flush:
NULL
,
release:
NULL
,
fsync:
NULL
,
fasync:
NULL
,
lock:
NULL
,
readv:
NULL
,
writev:
NULL
.
owner
=
THIS_MODULE
,
.
llseek
=
NULL
,
.
read
=
NULL
,
.
write
=
NULL
,
.
readdir
=
NULL
,
.
poll
=
NULL
,
.
ioctl
=
NULL
,
.
mmap
=
NULL
,
.
open
=
NULL
,
.
flush
=
NULL
,
.
release
=
NULL
,
.
fsync
=
NULL
,
.
fasync
=
NULL
,
.
lock
=
NULL
,
.
readv
=
NULL
,
.
writev
=
NULL
};
#ifdef LATER
...
...
arch/ia64/sn/kernel/setup.c
View file @
8beb1642
...
...
@@ -109,14 +109,14 @@ irqpda_t *irqpdaindr[NR_CPUS];
* VGA color display.
*/
struct
screen_info
sn1_screen_info
=
{
orig_x:
0
,
orig_y:
0
,
orig_video_mode:
3
,
orig_video_cols:
80
,
orig_video_ega_bx:
3
,
orig_video_lines:
25
,
orig_video_isVGA:
1
,
orig_video_points:
16
.
orig_x
=
0
,
.
orig_y
=
0
,
.
orig_video_mode
=
3
,
.
orig_video_cols
=
80
,
.
orig_video_ega_bx
=
3
,
.
orig_video_lines
=
25
,
.
orig_video_isVGA
=
1
,
.
orig_video_points
=
16
};
/*
...
...
@@ -170,9 +170,9 @@ early_sn1_setup(void)
#ifdef NOT_YET_CONFIG_IA64_MCA
extern
void
ia64_mca_cpe_int_handler
(
int
cpe_irq
,
void
*
arg
,
struct
pt_regs
*
ptregs
);
static
struct
irqaction
mca_cpe_irqaction
=
{
handler:
ia64_mca_cpe_int_handler
,
flags:
SA_INTERRUPT
,
name:
"cpe_hndlr"
.
handler
=
ia64_mca_cpe_int_handler
,
.
flags
=
SA_INTERRUPT
,
.
name
=
"cpe_hndlr"
};
#endif
#ifdef CONFIG_IA64_MCA
...
...
include/asm-ia64/bitops.h
View file @
8beb1642
...
...
@@ -326,7 +326,7 @@ ia64_fls (unsigned long x)
return
exp
-
0xffff
;
}
static
int
static
in
line
in
t
fls
(
int
x
)
{
return
ia64_fls
((
unsigned
int
)
x
);
...
...
include/asm-ia64/delay.h
View file @
8beb1642
...
...
@@ -53,7 +53,7 @@ ia64_get_itc (void)
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
#ifdef CONFIG_ITANIUM
while
(
unlikely
((
__s32
)
result
==
-
1
)
while
(
unlikely
((
__s32
)
result
==
-
1
)
)
__asm__
__volatile__
(
"mov %0=ar.itc"
:
"=r"
(
result
)
::
"memory"
);
#endif
return
result
;
...
...
include/asm-ia64/keyboard.h
View file @
8beb1642
...
...
@@ -16,6 +16,7 @@
#define KEYBOARD_IRQ isa_irq_to_vector(1)
#define DISABLE_KBD_DURING_INTERRUPTS 0
extern
unsigned
char
acpi_kbd_controller_present
;
extern
int
pckbd_setkeycode
(
unsigned
int
scancode
,
unsigned
int
keycode
);
extern
int
pckbd_getkeycode
(
unsigned
int
scancode
);
extern
int
pckbd_pretranslate
(
unsigned
char
scancode
,
char
raw_mode
);
...
...
@@ -26,6 +27,7 @@ extern void pckbd_leds(unsigned char leds);
extern
void
pckbd_init_hw
(
void
);
extern
unsigned
char
pckbd_sysrq_xlate
[
128
];
#define kbd_controller_present() acpi_kbd_controller_present
#define kbd_setkeycode pckbd_setkeycode
#define kbd_getkeycode pckbd_getkeycode
#define kbd_pretranslate pckbd_pretranslate
...
...
include/asm-ia64/kregs.h
View file @
8beb1642
...
...
@@ -64,6 +64,15 @@
#define IA64_PSR_RI_BIT 41
#define IA64_PSR_ED_BIT 43
#define IA64_PSR_BN_BIT 44
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
execve(). */
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
...
...
include/asm-ia64/machvec.h
View file @
8beb1642
...
...
@@ -210,6 +210,7 @@ extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
extern
ia64_mv_pci_dma_sync_single
swiotlb_sync_single
;
extern
ia64_mv_pci_dma_sync_sg
swiotlb_sync_sg
;
extern
ia64_mv_pci_dma_address
swiotlb_dma_address
;
extern
ia64_mv_pci_dma_supported
swiotlb_pci_dma_supported
;
/*
* Define default versions so we can extend machvec for new platforms without having
...
...
include/asm-ia64/machvec_init.h
View file @
8beb1642
...
...
@@ -16,6 +16,7 @@ extern ia64_mv_inl_t __ia64_inl;
extern
ia64_mv_outb_t
__ia64_outb
;
extern
ia64_mv_outw_t
__ia64_outw
;
extern
ia64_mv_outl_t
__ia64_outl
;
extern
ia64_mv_mmiob_t
__ia64_mmiob
;
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
...
...
include/asm-ia64/mmu_context.h
View file @
8beb1642
...
...
@@ -2,8 +2,8 @@
#define _ASM_IA64_MMU_CONTEXT_H
/*
* Copyright (C) 1998-200
1
Hewlett-Packard Co
*
Copyright (C) 1998-2001
David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998-200
2
Hewlett-Packard Co
*
David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
...
...
@@ -13,8 +13,6 @@
* consider the region number when performing a TLB lookup, we need to assign a unique
* region id to each region in a process. We use the least significant three bits in a
* region id for this purpose.
*
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define IA64_REGION_ID_KERNEL 0
/* the kernel's region id (tlb.c depends on this being 0) */
...
...
@@ -23,6 +21,8 @@
# ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
...
...
@@ -36,6 +36,7 @@ struct ia64_ctx {
};
extern
struct
ia64_ctx
ia64_ctx
;
extern
u8
ia64_need_tlb_flush
__per_cpu_data
;
extern
void
wrap_mmu_context
(
struct
mm_struct
*
mm
);
...
...
@@ -44,6 +45,23 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
/*
* When the context counter wraps around all TLBs need to be flushed because an old
* context number might have been reused. This is signalled by the ia64_need_tlb_flush
* per-CPU variable, which is checked in the routine below. Called by activate_mm().
* <efocht@ess.nec.de>
*/
static
inline
void
delayed_tlb_flush
(
void
)
{
extern
void
__flush_tlb_all
(
void
);
if
(
unlikely
(
ia64_need_tlb_flush
))
{
__flush_tlb_all
();
ia64_need_tlb_flush
=
0
;
}
}
static
inline
void
get_new_mmu_context
(
struct
mm_struct
*
mm
)
{
...
...
@@ -54,7 +72,6 @@ get_new_mmu_context (struct mm_struct *mm)
mm
->
context
=
ia64_ctx
.
next
++
;
}
spin_unlock
(
&
ia64_ctx
.
lock
);
}
static
inline
void
...
...
@@ -109,6 +126,8 @@ reload_context (struct mm_struct *mm)
static
inline
void
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
{
delayed_tlb_flush
();
/*
* We may get interrupts here, but that's OK because interrupt
* handlers cannot touch user-space.
...
...
include/asm-ia64/offsets.h
deleted
100644 → 0
View file @
12ebbff8
#ifndef _ASM_IA64_OFFSETS_H
#define _ASM_IA64_OFFSETS_H
/*
* DO NOT MODIFY
*
* This file was generated by arch/ia64/tools/print_offsets.awk.
*
*/
#define IA64_TASK_SIZE 3952
/* 0xf70 */
#define IA64_THREAD_INFO_SIZE 32
/* 0x20 */
#define IA64_PT_REGS_SIZE 400
/* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560
/* 0x230 */
#define IA64_SIGINFO_SIZE 128
/* 0x80 */
#define IA64_CPU_SIZE 224
/* 0xe0 */
#define SIGFRAME_SIZE 2816
/* 0xb00 */
#define UNW_FRAME_INFO_SIZE 448
/* 0x1c0 */
#define IA64_TASK_THREAD_KSP_OFFSET 1496
/* 0x5d8 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0
/* 0x0 */
#define IA64_PT_REGS_CR_IIP_OFFSET 8
/* 0x8 */
#define IA64_PT_REGS_CR_IFS_OFFSET 16
/* 0x10 */
#define IA64_PT_REGS_AR_UNAT_OFFSET 24
/* 0x18 */
#define IA64_PT_REGS_AR_PFS_OFFSET 32
/* 0x20 */
#define IA64_PT_REGS_AR_RSC_OFFSET 40
/* 0x28 */
#define IA64_PT_REGS_AR_RNAT_OFFSET 48
/* 0x30 */
#define IA64_PT_REGS_AR_BSPSTORE_OFFSET 56
/* 0x38 */
#define IA64_PT_REGS_PR_OFFSET 64
/* 0x40 */
#define IA64_PT_REGS_B6_OFFSET 72
/* 0x48 */
#define IA64_PT_REGS_LOADRS_OFFSET 80
/* 0x50 */
#define IA64_PT_REGS_R1_OFFSET 88
/* 0x58 */
#define IA64_PT_REGS_R2_OFFSET 96
/* 0x60 */
#define IA64_PT_REGS_R3_OFFSET 104
/* 0x68 */
#define IA64_PT_REGS_R12_OFFSET 112
/* 0x70 */
#define IA64_PT_REGS_R13_OFFSET 120
/* 0x78 */
#define IA64_PT_REGS_R14_OFFSET 128
/* 0x80 */
#define IA64_PT_REGS_R15_OFFSET 136
/* 0x88 */
#define IA64_PT_REGS_R8_OFFSET 144
/* 0x90 */
#define IA64_PT_REGS_R9_OFFSET 152
/* 0x98 */
#define IA64_PT_REGS_R10_OFFSET 160
/* 0xa0 */
#define IA64_PT_REGS_R11_OFFSET 168
/* 0xa8 */
#define IA64_PT_REGS_R16_OFFSET 176
/* 0xb0 */
#define IA64_PT_REGS_R17_OFFSET 184
/* 0xb8 */
#define IA64_PT_REGS_R18_OFFSET 192
/* 0xc0 */
#define IA64_PT_REGS_R19_OFFSET 200
/* 0xc8 */
#define IA64_PT_REGS_R20_OFFSET 208
/* 0xd0 */
#define IA64_PT_REGS_R21_OFFSET 216
/* 0xd8 */
#define IA64_PT_REGS_R22_OFFSET 224
/* 0xe0 */
#define IA64_PT_REGS_R23_OFFSET 232
/* 0xe8 */
#define IA64_PT_REGS_R24_OFFSET 240
/* 0xf0 */
#define IA64_PT_REGS_R25_OFFSET 248
/* 0xf8 */
#define IA64_PT_REGS_R26_OFFSET 256
/* 0x100 */
#define IA64_PT_REGS_R27_OFFSET 264
/* 0x108 */
#define IA64_PT_REGS_R28_OFFSET 272
/* 0x110 */
#define IA64_PT_REGS_R29_OFFSET 280
/* 0x118 */
#define IA64_PT_REGS_R30_OFFSET 288
/* 0x120 */
#define IA64_PT_REGS_R31_OFFSET 296
/* 0x128 */
#define IA64_PT_REGS_AR_CCV_OFFSET 304
/* 0x130 */
#define IA64_PT_REGS_AR_FPSR_OFFSET 312
/* 0x138 */
#define IA64_PT_REGS_B0_OFFSET 320
/* 0x140 */
#define IA64_PT_REGS_B7_OFFSET 328
/* 0x148 */
#define IA64_PT_REGS_F6_OFFSET 336
/* 0x150 */
#define IA64_PT_REGS_F7_OFFSET 352
/* 0x160 */
#define IA64_PT_REGS_F8_OFFSET 368
/* 0x170 */
#define IA64_PT_REGS_F9_OFFSET 384
/* 0x180 */
#define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0
/* 0x0 */
#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8
/* 0x8 */
#define IA64_SWITCH_STACK_F2_OFFSET 16
/* 0x10 */
#define IA64_SWITCH_STACK_F3_OFFSET 32
/* 0x20 */
#define IA64_SWITCH_STACK_F4_OFFSET 48
/* 0x30 */
#define IA64_SWITCH_STACK_F5_OFFSET 64
/* 0x40 */
#define IA64_SWITCH_STACK_F10_OFFSET 80
/* 0x50 */
#define IA64_SWITCH_STACK_F11_OFFSET 96
/* 0x60 */
#define IA64_SWITCH_STACK_F12_OFFSET 112
/* 0x70 */
#define IA64_SWITCH_STACK_F13_OFFSET 128
/* 0x80 */
#define IA64_SWITCH_STACK_F14_OFFSET 144
/* 0x90 */
#define IA64_SWITCH_STACK_F15_OFFSET 160
/* 0xa0 */
#define IA64_SWITCH_STACK_F16_OFFSET 176
/* 0xb0 */
#define IA64_SWITCH_STACK_F17_OFFSET 192
/* 0xc0 */
#define IA64_SWITCH_STACK_F18_OFFSET 208
/* 0xd0 */
#define IA64_SWITCH_STACK_F19_OFFSET 224
/* 0xe0 */
#define IA64_SWITCH_STACK_F20_OFFSET 240
/* 0xf0 */
#define IA64_SWITCH_STACK_F21_OFFSET 256
/* 0x100 */
#define IA64_SWITCH_STACK_F22_OFFSET 272
/* 0x110 */
#define IA64_SWITCH_STACK_F23_OFFSET 288
/* 0x120 */
#define IA64_SWITCH_STACK_F24_OFFSET 304
/* 0x130 */
#define IA64_SWITCH_STACK_F25_OFFSET 320
/* 0x140 */
#define IA64_SWITCH_STACK_F26_OFFSET 336
/* 0x150 */
#define IA64_SWITCH_STACK_F27_OFFSET 352
/* 0x160 */
#define IA64_SWITCH_STACK_F28_OFFSET 368
/* 0x170 */
#define IA64_SWITCH_STACK_F29_OFFSET 384
/* 0x180 */
#define IA64_SWITCH_STACK_F30_OFFSET 400
/* 0x190 */
#define IA64_SWITCH_STACK_F31_OFFSET 416
/* 0x1a0 */
#define IA64_SWITCH_STACK_R4_OFFSET 432
/* 0x1b0 */
#define IA64_SWITCH_STACK_R5_OFFSET 440
/* 0x1b8 */
#define IA64_SWITCH_STACK_R6_OFFSET 448
/* 0x1c0 */
#define IA64_SWITCH_STACK_R7_OFFSET 456
/* 0x1c8 */
#define IA64_SWITCH_STACK_B0_OFFSET 464
/* 0x1d0 */
#define IA64_SWITCH_STACK_B1_OFFSET 472
/* 0x1d8 */
#define IA64_SWITCH_STACK_B2_OFFSET 480
/* 0x1e0 */
#define IA64_SWITCH_STACK_B3_OFFSET 488
/* 0x1e8 */
#define IA64_SWITCH_STACK_B4_OFFSET 496
/* 0x1f0 */
#define IA64_SWITCH_STACK_B5_OFFSET 504
/* 0x1f8 */
#define IA64_SWITCH_STACK_AR_PFS_OFFSET 512
/* 0x200 */
#define IA64_SWITCH_STACK_AR_LC_OFFSET 520
/* 0x208 */
#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528
/* 0x210 */
#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536
/* 0x218 */
#define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544
/* 0x220 */
#define IA64_SWITCH_STACK_PR_OFFSET 552
/* 0x228 */
#define IA64_SIGCONTEXT_IP_OFFSET 40
/* 0x28 */
#define IA64_SIGCONTEXT_AR_BSP_OFFSET 72
/* 0x48 */
#define IA64_SIGCONTEXT_AR_FPSR_OFFSET 104
/* 0x68 */
#define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80
/* 0x50 */
#define IA64_SIGCONTEXT_AR_UNAT_OFFSET 96
/* 0x60 */
#define IA64_SIGCONTEXT_B0_OFFSET 136
/* 0x88 */
#define IA64_SIGCONTEXT_CFM_OFFSET 48
/* 0x30 */
#define IA64_SIGCONTEXT_FLAGS_OFFSET 0
/* 0x0 */
#define IA64_SIGCONTEXT_FR6_OFFSET 560
/* 0x230 */
#define IA64_SIGCONTEXT_PR_OFFSET 128
/* 0x80 */
#define IA64_SIGCONTEXT_R12_OFFSET 296
/* 0x128 */
#define IA64_SIGCONTEXT_RBS_BASE_OFFSET 2512
/* 0x9d0 */
#define IA64_SIGCONTEXT_LOADRS_OFFSET 2520
/* 0x9d8 */
#define IA64_SIGFRAME_ARG0_OFFSET 0
/* 0x0 */
#define IA64_SIGFRAME_ARG1_OFFSET 8
/* 0x8 */
#define IA64_SIGFRAME_ARG2_OFFSET 16
/* 0x10 */
#define IA64_SIGFRAME_HANDLER_OFFSET 24
/* 0x18 */
#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 160
/* 0xa0 */
#define IA64_CLONE_VFORK 16384
/* 0x4000 */
#define IA64_CLONE_VM 256
/* 0x100 */
#endif
/* _ASM_IA64_OFFSETS_H */
include/asm-ia64/pci.h
View file @
8beb1642
...
...
@@ -90,7 +90,7 @@ pcibios_penalize_isa_irq (int irq)
/* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0)
#define sg_dma_len(sg) ((sg)->length)
#define sg_dma_len(sg) ((sg)->
dma_
length)
#define HAVE_PCI_MMAP
extern
int
pci_mmap_page_range
(
struct
pci_dev
*
dev
,
struct
vm_area_struct
*
vma
,
...
...
include/asm-ia64/perfmon.h
View file @
8beb1642
...
...
@@ -172,9 +172,8 @@ extern int pfm_use_debug_registers(struct task_struct *);
extern
int
pfm_release_debug_registers
(
struct
task_struct
*
);
extern
int
pfm_cleanup_smpl_buf
(
struct
task_struct
*
);
extern
void
pfm_syst_wide_update_task
(
struct
task_struct
*
,
int
);
extern
void
pfm_ovfl_block_reset
(
void
);
extern
int
pfm_syst_wide
;
extern
void
pfm_ovfl_block_reset
(
void
);
extern
void
perfmon_init_percpu
(
void
);
#endif
/* __KERNEL__ */
...
...
include/asm-ia64/processor.h
View file @
8beb1642
...
...
@@ -270,12 +270,8 @@ struct thread_struct {
#define start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \
ia64_psr(regs)->dfh = 1;
/* disable fph */
\
ia64_psr(regs)->mfh = 0;
/* clear mfh */
\
ia64_psr(regs)->cpl = 3;
/* set user mode */
\
ia64_psr(regs)->ri = 0;
/* clear return slot number */
\
ia64_psr(regs)->is = 0;
/* IA-64 instruction set */
\
ia64_psr(regs)->sp = 1;
/* enforce secure perfmon */
\
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
regs->cr_iip = new_ip; \
regs->ar_rsc = 0xf;
/* eager mode, privilege level 3 */
\
regs->ar_rnat = 0; \
...
...
@@ -284,7 +280,7 @@ struct thread_struct {
regs->loadrs = 0; \
regs->r8 = current->mm->dumpable;
/* set "don't zap registers" flag */
\
regs->r12 = new_sp - 16;
/* allocate 16 byte scratch area */
\
if (
!likely (
current->mm->dumpable)) { \
if (
unlikely(!
current->mm->dumpable)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
...
...
include/asm-ia64/scatterlist.h
View file @
8beb1642
...
...
@@ -7,12 +7,12 @@
*/
struct
scatterlist
{
char
*
orig_address
;
/* for use by swiotlb */
/* These two are only valid if ADDRESS member of this struct is NULL. */
struct
page
*
page
;
unsigned
int
offset
;
unsigned
int
length
;
/* buffer length */
dma_addr_t
dma_address
;
unsigned
int
dma_length
;
};
#define ISA_DMA_THRESHOLD (~0UL)
...
...
include/asm-ia64/softirq.h
View file @
8beb1642
...
...
@@ -8,6 +8,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/hardirq.h>
#include <linux/compiler.h>
#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0)
...
...
include/asm-ia64/suspend.h
0 → 100644
View file @
8beb1642
include/asm-ia64/system.h
View file @
8beb1642
...
...
@@ -13,6 +13,7 @@
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
#include <linux/config.h>
#include <linux/percpu.h>
#include <asm/kregs.h>
#include <asm/page.h>
...
...
@@ -384,7 +385,8 @@ extern void ia64_save_extra (struct task_struct *task);
extern
void
ia64_load_extra
(
struct
task_struct
*
task
);
#if defined(CONFIG_SMP) && defined(CONFIG_PERFMON)
# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_wide != 0)
extern
int
__per_cpu_data
pfm_syst_wide
;
# define PERFMON_IS_SYSWIDE() (this_cpu(pfm_syst_wide) != 0)
#else
# define PERFMON_IS_SYSWIDE() (0)
#endif
...
...
include/asm-ia64/tlb.h
View file @
8beb1642
/* XXX fix me! */
#ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file was derived from asm-generic/tlb.h.
*/
/*
* Removing a translation from a page table (including TLB-shootdown) is a four-step
* procedure:
*
* (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
* (this is a no-op on ia64).
* (2) Clear the relevant portions of the page-table
* (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
* (4) Release the pages that were freed up in step (2).
*
* Note that the ordering of these steps is crucial to avoid races on MP machines.
*
* The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
* tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
* for each page-table-entry PTE that needs to be removed do {
* tlb_remove_tlb_entry(tlb, pte, address);
* if (pte refers to a normal page) {
* tlb_remove_page(tlb, page);
* }
* }
* tlb_end_vma(tlb, vma);
* }
* }
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_SMP
# define FREE_PTE_NR 2048
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
#else
# define FREE_PTE_NR 0
# define tlb_fast_mode(tlb) (1)
#endif
typedef
struct
{
struct
mm_struct
*
mm
;
unsigned
long
nr
;
/* == ~0UL => fast mode */
unsigned
long
freed
;
/* number of pages freed */
unsigned
long
start_addr
;
unsigned
long
end_addr
;
struct
page
*
pages
[
FREE_PTE_NR
];
}
mmu_gather_t
;
/* Users of the generic TLB shootdown code must declare this storage space. */
extern
mmu_gather_t
mmu_gathers
[
NR_CPUS
];
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
*/
static
inline
void
ia64_tlb_flush_mmu
(
mmu_gather_t
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
nr
;
if
(
unlikely
(
end
-
start
>=
1024
*
1024
*
1024
*
1024UL
||
rgn_index
(
start
)
!=
rgn_index
(
end
-
1
)))
{
/*
* If we flush more than a tera-byte or across regions, we're probably
* better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/
flush_tlb_all
();
}
else
{
/*
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
* vma pointer.
*/
struct
vm_area_struct
vma
;
vma
.
vm_mm
=
tlb
->
mm
;
/* flush the address range from the tlb: */
flush_tlb_range
(
&
vma
,
start
,
end
);
/* now flush the virt. page-table area mapping the address range: */
flush_tlb_range
(
&
vma
,
ia64_thash
(
start
),
ia64_thash
(
end
));
}
/* lastly, release the freed pages */
nr
=
tlb
->
nr
;
if
(
!
tlb_fast_mode
(
tlb
))
{
unsigned
long
i
;
tlb
->
nr
=
0
;
tlb
->
start_addr
=
~
0UL
;
for
(
i
=
0
;
i
<
nr
;
++
i
)
free_page_and_swap_cache
(
tlb
->
pages
[
i
]);
}
}
/*
* Return a pointer to an initialized mmu_gather_t.
*/
static
inline
mmu_gather_t
*
tlb_gather_mmu
(
struct
mm_struct
*
mm
)
{
mmu_gather_t
*
tlb
=
&
mmu_gathers
[
smp_processor_id
()];
tlb
->
mm
=
mm
;
tlb
->
freed
=
0
;
tlb
->
start_addr
=
~
0UL
;
/* Use fast mode if only one CPU is online */
tlb
->
nr
=
smp_num_cpus
>
1
?
0UL
:
~
0UL
;
return
tlb
;
}
/*
* Called at the end of the shootdown operation to free up any resources that were
* collected. The page table lock is still held at this point.
*/
static
inline
void
tlb_finish_mmu
(
mmu_gather_t
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
freed
=
tlb
->
freed
;
struct
mm_struct
*
mm
=
tlb
->
mm
;
unsigned
long
rss
=
mm
->
rss
;
if
(
rss
<
freed
)
freed
=
rss
;
mm
->
rss
=
rss
-
freed
;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
*/
ia64_tlb_flush_mmu
(
tlb
,
start
,
end
);
/* keep the page table cache within bounds */
check_pgt_cache
();
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static
inline
void
tlb_remove_tlb_entry
(
mmu_gather_t
*
tlb
,
pte_t
pte
,
unsigned
long
address
)
{
if
(
tlb
->
start_addr
==
~
0UL
)
tlb
->
start_addr
=
address
;
tlb
->
end_addr
=
address
+
PAGE_SIZE
;
}
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
static
inline
void
tlb_remove_page
(
mmu_gather_t
*
tlb
,
struct
page
*
page
)
{
if
(
tlb_fast_mode
(
tlb
))
{
free_page_and_swap_cache
(
page
);
return
;
}
tlb
->
pages
[
tlb
->
nr
++
]
=
page
;
if
(
tlb
->
nr
>=
FREE_PTE_NR
)
ia64_tlb_flush_mmu
(
tlb
,
tlb
->
start_addr
,
tlb
->
end_addr
);
}
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#
include <asm-generic/tlb.h>
#
endif
/* _ASM_IA64_TLB_H */
include/asm-ia64/tlbflush.h
View file @
8beb1642
...
...
@@ -70,12 +70,10 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
vm_area_struct
vma
;
if
(
REGION_NUMBER
(
start
)
!=
REGION_NUMBER
(
end
))
printk
(
"flush_tlb_pgtables: can't flush across regions!!
\n
"
);
vma
.
vm_mm
=
mm
;
flush_tlb_range
(
&
vma
,
ia64_thash
(
start
),
ia64_thash
(
end
));
/*
* Deprecated. The virtual page table is now flushed via the normal gather/flush
* interface (see tlb.h).
*/
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
/* XXX fix me */
...
...
include/asm-ia64/unistd.h
View file @
8beb1642
...
...
@@ -223,6 +223,10 @@
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
#define __NR_security 1233
#define __NR_get_large_pages 1234
#define __NR_free_large_pages 1235
#define __NR_share_large_pages 1236
#define __NR_unshare_large_pages 1237
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment