Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
17647b1d
Commit
17647b1d
authored
Sep 23, 2004
by
Richard Russon
Browse files
Options
Browse Files
Download
Plain Diff
Merge flatcap.org:/home/flatcap/backup/bk/ntfs-2.6
into flatcap.org:/home/flatcap/backup/bk/ntfs-2.6-devel
parents
d3762ba0
59f9f96e
Changes
55
Hide whitespace changes
Inline
Side-by-side
Showing
55 changed files
with
1765 additions
and
2464 deletions
+1765
-2464
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/alpha_ksyms.c
+0
-30
arch/alpha/kernel/core_cia.c
arch/alpha/kernel/core_cia.c
+2
-2
arch/alpha/kernel/core_irongate.c
arch/alpha/kernel/core_irongate.c
+11
-9
arch/alpha/kernel/core_marvel.c
arch/alpha/kernel/core_marvel.c
+141
-86
arch/alpha/kernel/core_titan.c
arch/alpha/kernel/core_titan.c
+32
-18
arch/alpha/kernel/err_titan.c
arch/alpha/kernel/err_titan.c
+1
-1
arch/alpha/kernel/machvec_impl.h
arch/alpha/kernel/machvec_impl.h
+45
-56
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/osf_sys.c
+36
-35
arch/alpha/kernel/pci-noop.c
arch/alpha/kernel/pci-noop.c
+12
-0
arch/alpha/kernel/pci.c
arch/alpha/kernel/pci.c
+34
-0
arch/alpha/kernel/srmcons.c
arch/alpha/kernel/srmcons.c
+1
-1
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_alcor.c
+2
-4
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_cabriolet.c
+0
-5
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_dp264.c
+0
-5
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eb64p.c
+0
-2
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_eiger.c
+0
-1
arch/alpha/kernel/sys_jensen.c
arch/alpha/kernel/sys_jensen.c
+0
-3
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_marvel.c
+0
-1
arch/alpha/kernel/sys_miata.c
arch/alpha/kernel/sys_miata.c
+0
-1
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_mikasa.c
+0
-2
arch/alpha/kernel/sys_nautilus.c
arch/alpha/kernel/sys_nautilus.c
+0
-1
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_noritake.c
+0
-2
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_rawhide.c
+0
-1
arch/alpha/kernel/sys_ruffian.c
arch/alpha/kernel/sys_ruffian.c
+0
-1
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_rx164.c
+0
-1
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_sable.c
+0
-3
arch/alpha/kernel/sys_sio.c
arch/alpha/kernel/sys_sio.c
+0
-5
arch/alpha/kernel/sys_sx164.c
arch/alpha/kernel/sys_sx164.c
+0
-1
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_takara.c
+0
-1
arch/alpha/kernel/sys_titan.c
arch/alpha/kernel/sys_titan.c
+5
-7
arch/alpha/kernel/sys_wildfire.c
arch/alpha/kernel/sys_wildfire.c
+0
-1
arch/alpha/kernel/traps.c
arch/alpha/kernel/traps.c
+1
-1
arch/alpha/lib/io.c
arch/alpha/lib/io.c
+283
-246
arch/alpha/mm/fault.c
arch/alpha/mm/fault.c
+1
-1
include/asm-alpha/compiler.h
include/asm-alpha/compiler.h
+10
-0
include/asm-alpha/core_apecs.h
include/asm-alpha/core_apecs.h
+94
-130
include/asm-alpha/core_cia.h
include/asm-alpha/core_cia.h
+91
-212
include/asm-alpha/core_irongate.h
include/asm-alpha/core_irongate.h
+17
-117
include/asm-alpha/core_lca.h
include/asm-alpha/core_lca.h
+91
-132
include/asm-alpha/core_marvel.h
include/asm-alpha/core_marvel.h
+21
-222
include/asm-alpha/core_mcpcia.h
include/asm-alpha/core_mcpcia.h
+82
-185
include/asm-alpha/core_polaris.h
include/asm-alpha/core_polaris.h
+17
-129
include/asm-alpha/core_t2.h
include/asm-alpha/core_t2.h
+67
-43
include/asm-alpha/core_titan.h
include/asm-alpha/core_titan.h
+15
-131
include/asm-alpha/core_tsunami.h
include/asm-alpha/core_tsunami.h
+16
-133
include/asm-alpha/core_wildfire.h
include/asm-alpha/core_wildfire.h
+16
-133
include/asm-alpha/io.h
include/asm-alpha/io.h
+399
-287
include/asm-alpha/io_trivial.h
include/asm-alpha/io_trivial.h
+127
-0
include/asm-alpha/jensen.h
include/asm-alpha/jensen.h
+57
-42
include/asm-alpha/machvec.h
include/asm-alpha/machvec.h
+22
-20
include/asm-alpha/mmu_context.h
include/asm-alpha/mmu_context.h
+1
-0
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock.h
+2
-2
include/asm-alpha/system.h
include/asm-alpha/system.h
+3
-3
include/asm-alpha/tlbflush.h
include/asm-alpha/tlbflush.h
+1
-0
include/asm-alpha/vga.h
include/asm-alpha/vga.h
+9
-9
No files found.
arch/alpha/kernel/alpha_ksyms.c
View file @
17647b1d
...
...
@@ -68,36 +68,6 @@ EXPORT_SYMBOL(alpha_using_srm);
#endif
/* CONFIG_ALPHA_GENERIC */
/* platform dependent support */
EXPORT_SYMBOL
(
_inb
);
EXPORT_SYMBOL
(
_inw
);
EXPORT_SYMBOL
(
_inl
);
EXPORT_SYMBOL
(
_outb
);
EXPORT_SYMBOL
(
_outw
);
EXPORT_SYMBOL
(
_outl
);
EXPORT_SYMBOL
(
_readb
);
EXPORT_SYMBOL
(
_readw
);
EXPORT_SYMBOL
(
_readl
);
EXPORT_SYMBOL
(
_writeb
);
EXPORT_SYMBOL
(
_writew
);
EXPORT_SYMBOL
(
_writel
);
EXPORT_SYMBOL
(
___raw_readb
);
EXPORT_SYMBOL
(
___raw_readw
);
EXPORT_SYMBOL
(
___raw_readl
);
EXPORT_SYMBOL
(
___raw_readq
);
EXPORT_SYMBOL
(
___raw_writeb
);
EXPORT_SYMBOL
(
___raw_writew
);
EXPORT_SYMBOL
(
___raw_writel
);
EXPORT_SYMBOL
(
___raw_writeq
);
EXPORT_SYMBOL
(
_memcpy_fromio
);
EXPORT_SYMBOL
(
_memcpy_toio
);
EXPORT_SYMBOL
(
_memset_c_io
);
EXPORT_SYMBOL
(
scr_memcpyw
);
EXPORT_SYMBOL
(
insb
);
EXPORT_SYMBOL
(
insw
);
EXPORT_SYMBOL
(
insl
);
EXPORT_SYMBOL
(
outsb
);
EXPORT_SYMBOL
(
outsw
);
EXPORT_SYMBOL
(
outsl
);
EXPORT_SYMBOL
(
strcat
);
EXPORT_SYMBOL
(
strcmp
);
EXPORT_SYMBOL
(
strcpy
);
...
...
arch/alpha/kernel/core_cia.c
View file @
17647b1d
...
...
@@ -282,7 +282,7 @@ void
cia_pci_tbi_try2
(
struct
pci_controller
*
hose
,
dma_addr_t
start
,
dma_addr_t
end
)
{
unsigned
long
bus_addr
;
void
__iomem
*
bus_addr
;
int
ctrl
;
/* Put the chip into PCI loopback mode. */
...
...
@@ -351,7 +351,7 @@ verify_tb_operation(void)
struct
pci_iommu_arena
*
arena
=
pci_isa_hose
->
sg_isa
;
int
ctrl
,
addr0
,
tag0
,
pte0
,
data0
;
int
temp
,
use_tbia_try2
=
0
;
unsigned
long
bus_addr
;
void
__iomem
*
bus_addr
;
/* pyxis -- tbia is broken */
if
(
pci_isa_hose
->
dense_io_base
)
...
...
arch/alpha/kernel/core_irongate.c
View file @
17647b1d
...
...
@@ -310,7 +310,7 @@ irongate_init_arch(void)
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
unsigned
long
void
__iomem
*
irongate_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
struct
vm_struct
*
area
;
...
...
@@ -320,7 +320,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
unsigned
long
gart_bus_addr
;
if
(
!
alpha_agpgart_size
)
return
addr
+
IRONGATE_MEM
;
return
(
void
__iomem
*
)(
addr
+
IRONGATE_MEM
)
;
gart_bus_addr
=
(
unsigned
long
)
IRONGATE0
->
bar0
&
PCI_BASE_ADDRESS_MEM_MASK
;
...
...
@@ -339,7 +339,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
/*
* Not found - assume legacy ioremap
*/
return
addr
+
IRONGATE_MEM
;
return
(
void
__iomem
*
)(
addr
+
IRONGATE_MEM
)
;
}
while
(
0
);
mmio_regs
=
(
u32
*
)(((
unsigned
long
)
IRONGATE0
->
bar1
&
...
...
@@ -353,7 +353,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
if
(
addr
&
~
PAGE_MASK
)
{
printk
(
"AGP ioremap failed... addr not page aligned (0x%lx)
\n
"
,
addr
);
return
addr
+
IRONGATE_MEM
;
return
(
void
__iomem
*
)(
addr
+
IRONGATE_MEM
)
;
}
last
=
addr
+
size
-
1
;
size
=
PAGE_ALIGN
(
last
)
-
addr
;
...
...
@@ -378,7 +378,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
* Map it
*/
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
if
(
!
area
)
return
(
unsigned
long
)
NULL
;
if
(
!
area
)
return
NULL
;
for
(
baddr
=
addr
,
vaddr
=
(
unsigned
long
)
area
->
addr
;
baddr
<=
last
;
...
...
@@ -391,7 +391,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
pte
,
PAGE_SIZE
,
0
))
{
printk
(
"AGP ioremap: FAILED to map...
\n
"
);
vfree
(
area
->
addr
);
return
(
unsigned
long
)
NULL
;
return
NULL
;
}
}
...
...
@@ -402,13 +402,15 @@ irongate_ioremap(unsigned long addr, unsigned long size)
printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
addr, size, vaddr);
#endif
return
vaddr
;
return
(
void
__iomem
*
)
vaddr
;
}
void
irongate_iounmap
(
unsigned
long
addr
)
irongate_iounmap
(
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(((
long
)
addr
>>
41
)
==
-
2
)
return
;
/* kseg map, nothing to do */
if
(
addr
)
return
vfree
((
void
*
)(
PAGE_MASK
&
addr
));
if
(
addr
)
return
vfree
((
void
*
)(
PAGE_MASK
&
addr
));
}
arch/alpha/kernel/core_marvel.c
View file @
17647b1d
...
...
@@ -610,11 +610,84 @@ marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
csrs
->
POx_SG_TBIA
.
csr
;
}
/*
* RTC Support
*/
struct
marvel_rtc_access_info
{
unsigned
long
function
;
unsigned
long
index
;
unsigned
long
data
;
};
static
void
__marvel_access_rtc
(
void
*
info
)
{
struct
marvel_rtc_access_info
*
rtc_access
=
info
;
register
unsigned
long
__r0
__asm__
(
"$0"
);
register
unsigned
long
__r16
__asm__
(
"$16"
)
=
rtc_access
->
function
;
register
unsigned
long
__r17
__asm__
(
"$17"
)
=
rtc_access
->
index
;
register
unsigned
long
__r18
__asm__
(
"$18"
)
=
rtc_access
->
data
;
__asm__
__volatile__
(
"call_pal %4 # cserve rtc"
:
"=r"
(
__r16
),
"=r"
(
__r17
),
"=r"
(
__r18
),
"=r"
(
__r0
)
:
"i"
(
PAL_cserve
),
"0"
(
__r16
),
"1"
(
__r17
),
"2"
(
__r18
)
:
"$1"
,
"$22"
,
"$23"
,
"$24"
,
"$25"
);
rtc_access
->
data
=
__r0
;
}
static
u8
__marvel_rtc_io
(
u8
b
,
unsigned
long
addr
,
int
write
)
{
static
u8
index
=
0
;
struct
marvel_rtc_access_info
rtc_access
;
u8
ret
=
0
;
switch
(
addr
)
{
case
0x70
:
/* RTC_PORT(0) */
if
(
write
)
index
=
b
;
ret
=
index
;
break
;
case
0x71
:
/* RTC_PORT(1) */
rtc_access
.
index
=
index
;
rtc_access
.
data
=
BCD_TO_BIN
(
b
);
rtc_access
.
function
=
0x48
+
!
write
;
/* GET/PUT_TOY */
#ifdef CONFIG_SMP
if
(
smp_processor_id
()
!=
boot_cpuid
)
smp_call_function_on_cpu
(
__marvel_access_rtc
,
&
rtc_access
,
1
,
1
,
cpumask_of_cpu
(
boot_cpuid
));
else
__marvel_access_rtc
(
&
rtc_access
);
#else
__marvel_access_rtc
(
&
rtc_access
);
#endif
ret
=
BIN_TO_BCD
(
rtc_access
.
data
);
break
;
default:
printk
(
KERN_WARNING
"Illegal RTC port %lx
\n
"
,
addr
);
break
;
}
return
ret
;
}
/*
* IO map support.
*/
unsigned
long
#define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
void
__iomem
*
marvel_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
struct
pci_controller
*
hose
;
...
...
@@ -633,8 +706,6 @@ marvel_ioremap(unsigned long addr, unsigned long size)
}
#endif
if
(
!
marvel_is_ioaddr
(
addr
))
return
0UL
;
/*
* Find the hose.
*/
...
...
@@ -643,7 +714,7 @@ marvel_ioremap(unsigned long addr, unsigned long size)
break
;
}
if
(
!
hose
)
return
0U
L
;
return
NUL
L
;
/*
* We have the hose - calculate the bus limits.
...
...
@@ -655,15 +726,17 @@ marvel_ioremap(unsigned long addr, unsigned long size)
* Is it direct-mapped?
*/
if
((
baddr
>=
__direct_map_base
)
&&
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
return
IDENT_ADDR
|
(
baddr
-
__direct_map_base
);
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
{
addr
=
IDENT_ADDR
|
(
baddr
-
__direct_map_base
);
return
(
void
__iomem
*
)
addr
;
}
/*
* Check the scatter-gather arena.
*/
if
(
hose
->
sg_pci
&&
baddr
>=
(
unsigned
long
)
hose
->
sg_pci
->
dma_base
&&
last
<
(
unsigned
long
)
hose
->
sg_pci
->
dma_base
+
hose
->
sg_pci
->
size
){
last
<
(
unsigned
long
)
hose
->
sg_pci
->
dma_base
+
hose
->
sg_pci
->
size
)
{
/*
* Adjust the limits (mappings must be page aligned)
...
...
@@ -677,7 +750,9 @@ marvel_ioremap(unsigned long addr, unsigned long size)
* Map it.
*/
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
if
(
!
area
)
return
(
unsigned
long
)
NULL
;
if
(
!
area
)
return
NULL
;
ptes
=
hose
->
sg_pci
->
ptes
;
for
(
vaddr
=
(
unsigned
long
)
area
->
addr
;
baddr
<=
last
;
...
...
@@ -686,7 +761,7 @@ marvel_ioremap(unsigned long addr, unsigned long size)
if
(
!
(
pfn
&
1
))
{
printk
(
"ioremap failed... pte not valid...
\n
"
);
vfree
(
area
->
addr
);
return
0U
L
;
return
NUL
L
;
}
pfn
>>=
1
;
/* make it a true pfn */
...
...
@@ -695,7 +770,7 @@ marvel_ioremap(unsigned long addr, unsigned long size)
PAGE_SIZE
,
0
))
{
printk
(
"FAILED to map...
\n
"
);
vfree
(
area
->
addr
);
return
0U
L
;
return
NUL
L
;
}
}
...
...
@@ -703,101 +778,81 @@ marvel_ioremap(unsigned long addr, unsigned long size)
vaddr
=
(
unsigned
long
)
area
->
addr
+
(
addr
&
~
PAGE_MASK
);
return
vaddr
;
return
(
void
__iomem
*
)
vaddr
;
}
/*
* Not found - assume legacy ioremap.
*/
return
addr
;
return
NULL
;
}
void
marvel_iounmap
(
unsigned
long
addr
)
marvel_iounmap
(
volatile
void
__iomem
*
x
addr
)
{
if
(((
long
)
addr
>>
41
)
==
-
2
)
return
;
/* kseg map, nothing to do */
if
(
addr
)
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
>=
VMALLOC_START
)
vfree
((
void
*
)(
PAGE_MASK
&
addr
));
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL
(
marvel_ioremap
);
EXPORT_SYMBOL
(
marvel_iounmap
);
#endif
/*
* RTC Support
*/
struct
marvel_rtc_access_info
{
unsigned
long
function
;
unsigned
long
index
;
unsigned
long
data
;
};
static
void
__marvel_access_rtc
(
void
*
info
)
int
marvel_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
struct
marvel_rtc_access_info
*
rtc_access
=
info
;
register
unsigned
long
__r0
__asm__
(
"$0"
);
register
unsigned
long
__r16
__asm__
(
"$16"
)
=
rtc_access
->
function
;
register
unsigned
long
__r17
__asm__
(
"$17"
)
=
rtc_access
->
index
;
register
unsigned
long
__r18
__asm__
(
"$18"
)
=
rtc_access
->
data
;
__asm__
__volatile__
(
"call_pal %4 # cserve rtc"
:
"=r"
(
__r16
),
"=r"
(
__r17
),
"=r"
(
__r18
),
"=r"
(
__r0
)
:
"i"
(
PAL_cserve
),
"0"
(
__r16
),
"1"
(
__r17
),
"2"
(
__r18
)
:
"$1"
,
"$22"
,
"$23"
,
"$24"
,
"$25"
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
rtc_access
->
data
=
__r0
;
if
(
addr
>=
VMALLOC_START
)
return
1
;
else
return
(
addr
&
0xFF000000UL
)
==
0
;
}
u8
__marvel_rtc_io
(
int
write
,
u8
b
,
unsigned
long
addr
)
{
struct
marvel_rtc_access_info
rtc_access
=
{
0
,
};
static
u8
index
=
0
;
u8
ret
=
0
;
switch
(
addr
)
{
case
0x70
:
/* RTC_PORT(0) */
if
(
write
)
index
=
b
;
ret
=
index
;
break
;
#define __marvel_is_port_vga(a) \
(((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
case
0x71
:
/* RTC_PORT(1) */
rtc_access
.
index
=
index
;
rtc_access
.
data
=
BCD_TO_BIN
(
b
);
rtc_access
.
function
=
0x49
;
/* GET_TOY */
if
(
write
)
rtc_access
.
function
=
0x48
;
/* PUT_TOY */
#ifdef CONFIG_SMP
if
(
smp_processor_id
()
!=
boot_cpuid
)
smp_call_function_on_cpu
(
__marvel_access_rtc
,
&
rtc_access
,
1
,
/* retry */
1
,
/* wait */
1UL
<<
boot_cpuid
);
else
__marvel_access_rtc
(
&
rtc_access
);
#else
__marvel_access_rtc
(
&
rtc_access
);
void
__iomem
*
marvel_ioportmap
(
unsigned
long
addr
)
{
if
(
__marvel_is_port_rtc
(
addr
)
||
__marvel_is_port_kbd
(
addr
))
;
#ifdef CONFIG_VGA_HOSE
else
if
(
__marvel_is_port_vga
(
addr
)
&&
pci_vga_hose
)
addr
+=
pci_vga_hose
->
io_space
->
start
;
#endif
ret
=
BIN_TO_BCD
(
rtc_access
.
data
);
break
;
else
return
NULL
;
return
(
void
__iomem
*
)
addr
;
}
default:
printk
(
KERN_WARNING
"Illegal RTC port %lx
\n
"
,
addr
);
break
;
}
unsigned
int
marvel_ioread8
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
__marvel_is_port_kbd
(
addr
))
return
0
;
else
if
(
__marvel_is_port_rtc
(
addr
))
return
__marvel_rtc_io
(
0
,
addr
,
0
);
else
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
return
ret
;
void
marvel_iowrite8
(
u8
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
__marvel_is_port_kbd
(
addr
))
return
;
else
if
(
__marvel_is_port_rtc
(
addr
))
__marvel_rtc_io
(
b
,
addr
,
1
);
else
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL
(
marvel_ioremap
);
EXPORT_SYMBOL
(
marvel_iounmap
);
EXPORT_SYMBOL
(
marvel_is_mmio
);
EXPORT_SYMBOL
(
marvel_ioportmap
);
EXPORT_SYMBOL
(
marvel_ioread8
);
EXPORT_SYMBOL
(
marvel_iowrite8
);
#endif
/*
* NUMA Support
...
...
arch/alpha/kernel/core_titan.c
View file @
17647b1d
...
...
@@ -461,7 +461,8 @@ titan_kill_arch(int mode)
/*
* IO map support.
*/
unsigned
long
void
__iomem
*
titan_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
int
h
=
(
addr
&
TITAN_HOSE_MASK
)
>>
TITAN_HOSE_SHIFT
;
...
...
@@ -487,15 +488,19 @@ titan_ioremap(unsigned long addr, unsigned long size)
* Find the hose.
*/
for
(
hose
=
hose_head
;
hose
;
hose
=
hose
->
next
)
if
(
hose
->
index
==
h
)
break
;
if
(
!
hose
)
return
(
unsigned
long
)
NULL
;
if
(
hose
->
index
==
h
)
break
;
if
(
!
hose
)
return
NULL
;
/*
* Is it direct-mapped?
*/
if
((
baddr
>=
__direct_map_base
)
&&
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
return
addr
-
__direct_map_base
+
TITAN_MEM_BIAS
;
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
{
vaddr
=
addr
-
__direct_map_base
+
TITAN_MEM_BIAS
;
return
(
void
__iomem
*
)
vaddr
;
}
/*
* Check the scatter-gather arena.
...
...
@@ -516,7 +521,9 @@ titan_ioremap(unsigned long addr, unsigned long size)
* Map it
*/
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
if
(
!
area
)
return
(
unsigned
long
)
NULL
;
if
(
!
area
)
return
NULL
;
ptes
=
hose
->
sg_pci
->
ptes
;
for
(
vaddr
=
(
unsigned
long
)
area
->
addr
;
baddr
<=
last
;
...
...
@@ -525,7 +532,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
if
(
!
(
pfn
&
1
))
{
printk
(
"ioremap failed... pte not valid...
\n
"
);
vfree
(
area
->
addr
);
return
(
unsigned
long
)
NULL
;
return
NULL
;
}
pfn
>>=
1
;
/* make it a true pfn */
...
...
@@ -534,35 +541,42 @@ titan_ioremap(unsigned long addr, unsigned long size)
PAGE_SIZE
,
0
))
{
printk
(
"FAILED to map...
\n
"
);
vfree
(
area
->
addr
);
return
(
unsigned
long
)
NULL
;
return
NULL
;
}
}
flush_tlb_all
();
vaddr
=
(
unsigned
long
)
area
->
addr
+
(
addr
&
~
PAGE_MASK
);
return
vaddr
;
return
(
void
__iomem
*
)
vaddr
;
}
/*
* Not found - assume legacy ioremap.
*/
return
addr
+
TITAN_MEM_BIAS
;
return
NULL
;
}
void
titan_iounmap
(
unsigned
long
addr
)
titan_iounmap
(
volatile
void
__iomem
*
x
addr
)
{
if
(((
long
)
addr
>>
41
)
==
-
2
)
return
;
/* kseg map, nothing to do */
if
(
addr
)
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
>=
VMALLOC_START
)
vfree
((
void
*
)(
PAGE_MASK
&
addr
));
}
int
titan_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
>=
VMALLOC_START
)
return
1
;
else
return
(
addr
&
0x100000000UL
)
==
0
;
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL
(
titan_ioremap
);
EXPORT_SYMBOL
(
titan_iounmap
);
EXPORT_SYMBOL
(
titan_is_mmio
);
#endif
/*
...
...
arch/alpha/kernel/err_titan.c
View file @
17647b1d
...
...
@@ -177,7 +177,7 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
#define TITAN__PCHIP_PERROR__CMD__S (52)
#define TITAN__PCHIP_PERROR__CMD__M (0x0f)
#define TITAN__PCHIP_PERROR__ADDR__S (14)
#define TITAN__PCHIP_PERROR__ADDR__M (0x1ffffffff)
#define TITAN__PCHIP_PERROR__ADDR__M (0x1ffffffff
ul
)
if
(
!
(
perror
&
TITAN__PCHIP_PERROR__ERRMASK
))
return
MCHK_DISPOSITION_UNKNOWN_ERROR
;
...
...
arch/alpha/kernel/machvec_impl.h
View file @
17647b1d
...
...
@@ -44,56 +44,60 @@
#define DO_DEFAULT_RTC rtc_port: 0x70
#define DO_EV4_MMU \
max_asn:
EV4_MAX_ASN, \
mv_switch_mm:
ev4_switch_mm, \
mv_activate_mm:
ev4_activate_mm, \
mv_flush_tlb_current:
ev4_flush_tlb_current, \
mv_flush_tlb_current_page:
ev4_flush_tlb_current_page
.max_asn =
EV4_MAX_ASN, \
.mv_switch_mm =
ev4_switch_mm, \
.mv_activate_mm =
ev4_activate_mm, \
.mv_flush_tlb_current =
ev4_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev4_flush_tlb_current_page
#define DO_EV5_MMU \
max_asn:
EV5_MAX_ASN, \
mv_switch_mm:
ev5_switch_mm, \
mv_activate_mm:
ev5_activate_mm, \
mv_flush_tlb_current:
ev5_flush_tlb_current, \
mv_flush_tlb_current_page:
ev5_flush_tlb_current_page
.max_asn =
EV5_MAX_ASN, \
.mv_switch_mm =
ev5_switch_mm, \
.mv_activate_mm =
ev5_activate_mm, \
.mv_flush_tlb_current =
ev5_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev5_flush_tlb_current_page
#define DO_EV6_MMU \
max_asn:
EV6_MAX_ASN, \
mv_switch_mm:
ev5_switch_mm, \
mv_activate_mm:
ev5_activate_mm, \
mv_flush_tlb_current:
ev5_flush_tlb_current, \
mv_flush_tlb_current_page:
ev5_flush_tlb_current_page
.max_asn =
EV6_MAX_ASN, \
.mv_switch_mm =
ev5_switch_mm, \
.mv_activate_mm =
ev5_activate_mm, \
.mv_flush_tlb_current =
ev5_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev5_flush_tlb_current_page
#define DO_EV7_MMU \
max_asn:
EV6_MAX_ASN, \
mv_switch_mm:
ev5_switch_mm, \
mv_activate_mm:
ev5_activate_mm, \
mv_flush_tlb_current:
ev5_flush_tlb_current, \
mv_flush_tlb_current_page:
ev5_flush_tlb_current_page
.max_asn =
EV6_MAX_ASN, \
.mv_switch_mm =
ev5_switch_mm, \
.mv_activate_mm =
ev5_activate_mm, \
.mv_flush_tlb_current =
ev5_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev5_flush_tlb_current_page
#define IO_LITE(UP,low) \
hae_register: (unsigned long *) CAT(UP,_HAE_ADDRESS), \
iack_sc: CAT(UP,_IACK_SC), \
mv_inb: CAT(low,_inb), \
mv_inw: CAT(low,_inw), \
mv_inl: CAT(low,_inl), \
mv_outb: CAT(low,_outb), \
mv_outw: CAT(low,_outw), \
mv_outl: CAT(low,_outl), \
mv_readb: CAT(low,_readb), \
mv_readw: CAT(low,_readw), \
mv_readl: CAT(low,_readl), \
mv_readq: CAT(low,_readq), \
mv_writeb: CAT(low,_writeb), \
mv_writew: CAT(low,_writew), \
mv_writel: CAT(low,_writel), \
mv_writeq: CAT(low,_writeq), \
mv_ioremap: CAT(low,_ioremap), \
mv_iounmap: CAT(low,_iounmap) \
.hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \
.iack_sc = CAT(UP,_IACK_SC), \
.mv_ioread8 = CAT(low,_ioread8), \
.mv_ioread16 = CAT(low,_ioread16), \
.mv_ioread32 = CAT(low,_ioread32), \
.mv_iowrite8 = CAT(low,_iowrite8), \
.mv_iowrite16 = CAT(low,_iowrite16), \
.mv_iowrite32 = CAT(low,_iowrite32), \
.mv_readb = CAT(low,_readb), \
.mv_readw = CAT(low,_readw), \
.mv_readl = CAT(low,_readl), \
.mv_readq = CAT(low,_readq), \
.mv_writeb = CAT(low,_writeb), \
.mv_writew = CAT(low,_writew), \
.mv_writel = CAT(low,_writel), \
.mv_writeq = CAT(low,_writeq), \
.mv_ioportmap = CAT(low,_ioportmap), \
.mv_ioremap = CAT(low,_ioremap), \
.mv_iounmap = CAT(low,_iounmap), \
.mv_is_ioaddr = CAT(low,_is_ioaddr), \
.mv_is_mmio = CAT(low,_is_mmio) \
#define IO(UP,low) \
IO_LITE(UP,low), \
pci_ops: &CAT(low,_pci_ops)
.pci_ops = &CAT(low,_pci_ops), \
.mv_pci_tbi = CAT(low,_pci_tbi)
#define DO_APECS_IO IO(APECS,apecs)
#define DO_CIA_IO IO(CIA,cia)
...
...
@@ -108,23 +112,8 @@
#define DO_WILDFIRE_IO IO(WILDFIRE,wildfire)
#define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \
pci_ops: &CAT(cia,_pci_ops)
#define BUS(which) \
mv_is_ioaddr: CAT(which,_is_ioaddr), \
mv_pci_tbi: CAT(which,_pci_tbi)
#define DO_APECS_BUS BUS(apecs)
#define DO_CIA_BUS BUS(cia)
#define DO_IRONGATE_BUS BUS(irongate)
#define DO_LCA_BUS BUS(lca)
#define DO_MARVEL_BUS BUS(marvel)
#define DO_MCPCIA_BUS BUS(mcpcia)
#define DO_POLARIS_BUS BUS(polaris)
#define DO_T2_BUS BUS(t2)
#define DO_TSUNAMI_BUS BUS(tsunami)
#define DO_TITAN_BUS BUS(titan)
#define DO_WILDFIRE_BUS BUS(wildfire)
.pci_ops = &cia_pci_ops, \
.mv_pci_tbi = cia_pci_tbi
/*
* In a GENERIC kernel, we have lots of these vectors floating about,
...
...
arch/alpha/kernel/osf_sys.c
View file @
17647b1d
...
...
@@ -1110,46 +1110,47 @@ osf_getrusage(int who, struct rusage32 __user *ru)
return
copy_to_user
(
ru
,
&
r
,
sizeof
(
r
))
?
-
EFAULT
:
0
;
}
asmlinkage
int
osf_wait4
(
pid_t
pid
,
int
__user
*
ustatus
,
int
options
,
struct
rusage32
__user
*
ur
)
asmlinkage
long
osf_wait4
(
pid_t
pid
,
int
__user
*
ustatus
,
int
options
,
struct
rusage32
__user
*
ur
)
{
if
(
!
ur
)
{
struct
rusage
r
;
long
ret
,
err
;
mm_segment_t
old_fs
;
if
(
!
ur
)
return
sys_wait4
(
pid
,
ustatus
,
options
,
NULL
);
}
else
{
struct
rusage
r
;
int
ret
,
status
;
mm_segment_t
old_fs
=
get_fs
();
old_fs
=
get_fs
();
set_fs
(
KERNEL_DS
);
ret
=
sys_wait4
(
pid
,
&
status
,
options
,
&
r
);
set_fs
(
old_fs
);
set_fs
(
KERNEL_DS
);
ret
=
sys_wait4
(
pid
,
ustatus
,
options
,
(
struct
rusage
__user
*
)
&
r
);
set_fs
(
old_fs
);
if
(
!
access_ok
(
VERIFY_WRITE
,
ur
,
sizeof
(
*
ur
)))
return
-
EFAULT
;
__put_user
(
r
.
ru_utime
.
tv_sec
,
&
ur
->
ru_utime
.
tv_sec
);
__put_user
(
r
.
ru_utime
.
tv_usec
,
&
ur
->
ru_utime
.
tv_usec
);
__put_user
(
r
.
ru_stime
.
tv_sec
,
&
ur
->
ru_stime
.
tv_sec
);
__put_user
(
r
.
ru_stime
.
tv_usec
,
&
ur
->
ru_stime
.
tv_usec
);
__put_user
(
r
.
ru_maxrss
,
&
ur
->
ru_maxrss
);
__put_user
(
r
.
ru_ixrss
,
&
ur
->
ru_ixrss
);
__put_user
(
r
.
ru_idrss
,
&
ur
->
ru_idrss
);
__put_user
(
r
.
ru_isrss
,
&
ur
->
ru_isrss
);
__put_user
(
r
.
ru_minflt
,
&
ur
->
ru_minflt
);
__put_user
(
r
.
ru_majflt
,
&
ur
->
ru_majflt
);
__put_user
(
r
.
ru_nswap
,
&
ur
->
ru_nswap
);
__put_user
(
r
.
ru_inblock
,
&
ur
->
ru_inblock
);
__put_user
(
r
.
ru_oublock
,
&
ur
->
ru_oublock
);
__put_user
(
r
.
ru_msgsnd
,
&
ur
->
ru_msgsnd
);
__put_user
(
r
.
ru_msgrcv
,
&
ur
->
ru_msgrcv
);
__put_user
(
r
.
ru_nsignals
,
&
ur
->
ru_nsignals
);
__put_user
(
r
.
ru_nvcsw
,
&
ur
->
ru_nvcsw
);
if
(
__put_user
(
r
.
ru_nivcsw
,
&
ur
->
ru_nivcsw
))
return
-
EFAULT
;
if
(
!
access_ok
(
VERIFY_WRITE
,
ur
,
sizeof
(
*
ur
)))
return
-
EFAULT
;
if
(
ustatus
&&
put_user
(
status
,
ustatus
))
return
-
EFAULT
;
return
ret
;
}
err
=
0
;
err
|=
__put_user
(
r
.
ru_utime
.
tv_sec
,
&
ur
->
ru_utime
.
tv_sec
);
err
|=
__put_user
(
r
.
ru_utime
.
tv_usec
,
&
ur
->
ru_utime
.
tv_usec
);
err
|=
__put_user
(
r
.
ru_stime
.
tv_sec
,
&
ur
->
ru_stime
.
tv_sec
);
err
|=
__put_user
(
r
.
ru_stime
.
tv_usec
,
&
ur
->
ru_stime
.
tv_usec
);
err
|=
__put_user
(
r
.
ru_maxrss
,
&
ur
->
ru_maxrss
);
err
|=
__put_user
(
r
.
ru_ixrss
,
&
ur
->
ru_ixrss
);
err
|=
__put_user
(
r
.
ru_idrss
,
&
ur
->
ru_idrss
);
err
|=
__put_user
(
r
.
ru_isrss
,
&
ur
->
ru_isrss
);
err
|=
__put_user
(
r
.
ru_minflt
,
&
ur
->
ru_minflt
);
err
|=
__put_user
(
r
.
ru_majflt
,
&
ur
->
ru_majflt
);
err
|=
__put_user
(
r
.
ru_nswap
,
&
ur
->
ru_nswap
);
err
|=
__put_user
(
r
.
ru_inblock
,
&
ur
->
ru_inblock
);
err
|=
__put_user
(
r
.
ru_oublock
,
&
ur
->
ru_oublock
);
err
|=
__put_user
(
r
.
ru_msgsnd
,
&
ur
->
ru_msgsnd
);
err
|=
__put_user
(
r
.
ru_msgrcv
,
&
ur
->
ru_msgrcv
);
err
|=
__put_user
(
r
.
ru_nsignals
,
&
ur
->
ru_nsignals
);
err
|=
__put_user
(
r
.
ru_nvcsw
,
&
ur
->
ru_nvcsw
);
err
|=
__put_user
(
r
.
ru_nivcsw
,
&
ur
->
ru_nivcsw
);
return
err
?
err
:
ret
;
}
/*
...
...
arch/alpha/kernel/pci-noop.c
View file @
17647b1d
...
...
@@ -200,3 +200,15 @@ dma_set_mask(struct device *dev, u64 mask)
return
0
;
}
void
__iomem
*
pci_iomap
(
struct
pci_dev
*
dev
,
int
bar
,
unsigned
long
maxlen
)
{
return
NULL
;
}
void
pci_iounmap
(
struct
pci_dev
*
dev
,
void
__iomem
*
addr
)
{
}
EXPORT_SYMBOL
(
pci_iomap
);
EXPORT_SYMBOL
(
pci_iounmap
);
arch/alpha/kernel/pci.c
View file @
17647b1d
...
...
@@ -531,3 +531,37 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
return
-
EOPNOTSUPP
;
}
/* Create an __iomem token from a PCI BAR. Copied from lib/iomap.c with
no changes, since we don't want the other things in that object file. */
void
__iomem
*
pci_iomap
(
struct
pci_dev
*
dev
,
int
bar
,
unsigned
long
maxlen
)
{
unsigned
long
start
=
pci_resource_start
(
dev
,
bar
);
unsigned
long
len
=
pci_resource_len
(
dev
,
bar
);
unsigned
long
flags
=
pci_resource_flags
(
dev
,
bar
);
if
(
!
len
||
!
start
)
return
NULL
;
if
(
maxlen
&&
len
>
maxlen
)
len
=
maxlen
;
if
(
flags
&
IORESOURCE_IO
)
return
ioport_map
(
start
,
len
);
if
(
flags
&
IORESOURCE_MEM
)
{
/* Not checking IORESOURCE_CACHEABLE because alpha does
not distinguish between ioremap and ioremap_nocache. */
return
ioremap
(
start
,
len
);
}
return
NULL
;
}
/* Destroy that token. Not copied from lib/iomap.c. */
void
pci_iounmap
(
struct
pci_dev
*
dev
,
void
__iomem
*
addr
)
{
if
(
__is_mmio
(
addr
))
iounmap
(
addr
);
}
EXPORT_SYMBOL
(
pci_iomap
);
EXPORT_SYMBOL
(
pci_iounmap
);
arch/alpha/kernel/srmcons.c
View file @
17647b1d
...
...
@@ -146,7 +146,7 @@ srmcons_write(struct tty_struct *tty, int from_user,
if
(
c
>
sizeof
(
tmp
))
c
=
sizeof
(
tmp
);
c
-=
copy_from_user
(
tmp
,
buf
,
c
);
c
-=
copy_from_user
(
tmp
,
(
const
char
__user
*
)
buf
,
c
);
if
(
!
c
)
{
printk
(
"%s: EFAULT (count %d)
\n
"
,
...
...
arch/alpha/kernel/sys_alcor.c
View file @
17647b1d
...
...
@@ -274,7 +274,6 @@ struct alpha_machine_vector alcor_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -292,7 +291,7 @@ struct alpha_machine_vector alcor_mv __initmv = {
.
pci_swizzle
=
common_swizzle
,
.
sys
=
{
.
cia
=
{
.
gru_int_req_bits
=
ALCOR_GRU_INT_REQ_BITS
.
gru_int_req_bits
=
ALCOR_GRU_INT_REQ_BITS
}}
};
ALIAS_MV
(
alcor
)
...
...
@@ -302,7 +301,6 @@ struct alpha_machine_vector xlt_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -320,7 +318,7 @@ struct alpha_machine_vector xlt_mv __initmv = {
.
pci_swizzle
=
common_swizzle
,
.
sys
=
{
.
cia
=
{
.
gru_int_req_bits
=
XLT_GRU_INT_REQ_BITS
.
gru_int_req_bits
=
XLT_GRU_INT_REQ_BITS
}}
};
...
...
arch/alpha/kernel/sys_cabriolet.c
View file @
17647b1d
...
...
@@ -327,7 +327,6 @@ struct alpha_machine_vector cabriolet_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -354,7 +353,6 @@ struct alpha_machine_vector eb164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -380,7 +378,6 @@ struct alpha_machine_vector eb66p_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -405,7 +402,6 @@ struct alpha_machine_vector lx164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -432,7 +428,6 @@ struct alpha_machine_vector pc164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_dp264.c
View file @
17647b1d
...
...
@@ -569,7 +569,6 @@ struct alpha_machine_vector dp264_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -594,7 +593,6 @@ struct alpha_machine_vector monet_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -618,7 +616,6 @@ struct alpha_machine_vector webbrick_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -642,7 +639,6 @@ struct alpha_machine_vector clipper_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -671,7 +667,6 @@ struct alpha_machine_vector shark_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_eb64p.c
View file @
17647b1d
...
...
@@ -212,7 +212,6 @@ struct alpha_machine_vector eb64p_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -238,7 +237,6 @@ struct alpha_machine_vector eb66_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_eiger.c
View file @
17647b1d
...
...
@@ -222,7 +222,6 @@ struct alpha_machine_vector eiger_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_jensen.c
View file @
17647b1d
...
...
@@ -249,8 +249,6 @@ jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs)
printk
(
KERN_CRIT
"Machine check
\n
"
);
}
#define jensen_pci_tbi ((void*)0)
/*
* The System Vector
...
...
@@ -260,7 +258,6 @@ struct alpha_machine_vector jensen_mv __initmv = {
.
vector_name
=
"Jensen"
,
DO_EV4_MMU
,
IO_LITE
(
JENSEN
,
jensen
),
BUS
(
jensen
),
.
machine_check
=
jensen_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
rtc_port
=
0x170
,
...
...
arch/alpha/kernel/sys_marvel.c
View file @
17647b1d
...
...
@@ -471,7 +471,6 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
DO_EV7_MMU
,
DO_DEFAULT_RTC
,
DO_MARVEL_IO
,
DO_MARVEL_BUS
,
.
machine_check
=
marvel_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_miata.c
View file @
17647b1d
...
...
@@ -269,7 +269,6 @@ struct alpha_machine_vector miata_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_mikasa.c
View file @
17647b1d
...
...
@@ -221,7 +221,6 @@ struct alpha_machine_vector mikasa_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
mikasa_apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -246,7 +245,6 @@ struct alpha_machine_vector mikasa_primo_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_nautilus.c
View file @
17647b1d
...
...
@@ -250,7 +250,6 @@ struct alpha_machine_vector nautilus_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_IRONGATE_IO
,
DO_IRONGATE_BUS
,
.
machine_check
=
nautilus_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_noritake.c
View file @
17647b1d
...
...
@@ -303,7 +303,6 @@ struct alpha_machine_vector noritake_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
noritake_apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -328,7 +327,6 @@ struct alpha_machine_vector noritake_primo_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_rawhide.c
View file @
17647b1d
...
...
@@ -250,7 +250,6 @@ struct alpha_machine_vector rawhide_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_MCPCIA_IO
,
DO_MCPCIA_BUS
,
.
machine_check
=
mcpcia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_ruffian.c
View file @
17647b1d
...
...
@@ -220,7 +220,6 @@ struct alpha_machine_vector ruffian_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_rx164.c
View file @
17647b1d
...
...
@@ -201,7 +201,6 @@ struct alpha_machine_vector rx164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_POLARIS_IO
,
DO_POLARIS_BUS
,
.
machine_check
=
polaris_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_sable.c
View file @
17647b1d
...
...
@@ -566,7 +566,6 @@ struct alpha_machine_vector sable_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_T2_IO
,
DO_T2_BUS
,
.
machine_check
=
t2_machine_check
,
.
max_isa_dma_address
=
ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -599,7 +598,6 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_T2_IO
,
DO_T2_BUS
,
.
machine_check
=
t2_machine_check
,
.
max_isa_dma_address
=
ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -631,7 +629,6 @@ struct alpha_machine_vector lynx_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_T2_IO
,
DO_T2_BUS
,
.
machine_check
=
t2_machine_check
,
.
max_isa_dma_address
=
ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_sio.c
View file @
17647b1d
...
...
@@ -288,7 +288,6 @@ struct alpha_machine_vector alphabook1_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -319,7 +318,6 @@ struct alpha_machine_vector avanti_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -349,7 +347,6 @@ struct alpha_machine_vector noname_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -388,7 +385,6 @@ struct alpha_machine_vector p2k_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -418,7 +414,6 @@ struct alpha_machine_vector xl_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
BUS
(
apecs
),
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_XL_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_sx164.c
View file @
17647b1d
...
...
@@ -158,7 +158,6 @@ struct alpha_machine_vector sx164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_takara.c
View file @
17647b1d
...
...
@@ -277,7 +277,6 @@ struct alpha_machine_vector takara_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_titan.c
View file @
17647b1d
...
...
@@ -66,7 +66,7 @@ titan_update_irq_hw(unsigned long mask)
register
int
bcpu
=
boot_cpuid
;
#ifdef CONFIG_SMP
register
unsigned
long
cpm
=
cpu_present_mask
;
cpumask_t
cpm
=
cpu_present_mask
;
volatile
unsigned
long
*
dim0
,
*
dim1
,
*
dim2
,
*
dim3
;
unsigned
long
mask0
,
mask1
,
mask2
,
mask3
,
dummy
;
...
...
@@ -85,10 +85,10 @@ titan_update_irq_hw(unsigned long mask)
dim1
=
&
cchip
->
dim1
.
csr
;
dim2
=
&
cchip
->
dim2
.
csr
;
dim3
=
&
cchip
->
dim3
.
csr
;
if
(
(
cpm
&
1
)
==
0
)
dim0
=
&
dummy
;
if
(
(
cpm
&
2
)
==
0
)
dim1
=
&
dummy
;
if
(
(
cpm
&
4
)
==
0
)
dim2
=
&
dummy
;
if
(
(
cpm
&
8
)
==
0
)
dim3
=
&
dummy
;
if
(
!
cpu_isset
(
0
,
cpm
)
)
dim0
=
&
dummy
;
if
(
!
cpu_isset
(
1
,
cpm
)
)
dim1
=
&
dummy
;
if
(
!
cpu_isset
(
2
,
cpm
)
)
dim2
=
&
dummy
;
if
(
!
cpu_isset
(
3
,
cpm
)
)
dim3
=
&
dummy
;
*
dim0
=
mask0
;
*
dim1
=
mask1
;
...
...
@@ -369,7 +369,6 @@ struct alpha_machine_vector titan_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TITAN_IO
,
DO_TITAN_BUS
,
.
machine_check
=
titan_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -397,7 +396,6 @@ struct alpha_machine_vector privateer_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TITAN_IO
,
DO_TITAN_BUS
,
.
machine_check
=
privateer_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_wildfire.c
View file @
17647b1d
...
...
@@ -337,7 +337,6 @@ struct alpha_machine_vector wildfire_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_WILDFIRE_IO
,
DO_WILDFIRE_BUS
,
.
machine_check
=
wildfire_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/traps.c
View file @
17647b1d
...
...
@@ -111,7 +111,7 @@ dik_show_code(unsigned int *pc)
printk
(
"Code:"
);
for
(
i
=
-
6
;
i
<
2
;
i
++
)
{
unsigned
int
insn
;
if
(
__get_user
(
insn
,
pc
+
i
))
if
(
__get_user
(
insn
,
(
unsigned
int
__user
*
)
pc
+
i
))
break
;
printk
(
"%c%08x%c"
,
i
?
' '
:
'<'
,
insn
,
i
?
' '
:
'>'
);
}
...
...
arch/alpha/lib/io.c
View file @
17647b1d
...
...
@@ -6,164 +6,246 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#include <asm/io.h>
u8
_inb
(
unsigned
long
addr
)
/* Out-of-line versions of the i/o routines that redirect into the
platform-specific version. Note that "platform-specific" may mean
"generic", which bumps through the machine vector. */
unsigned
int
ioread8
(
void
__iomem
*
addr
)
{
return
__inb
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)(
addr
);
mb
();
return
ret
;
}
u
16
_inw
(
unsigned
long
addr
)
u
nsigned
int
ioread16
(
void
__iomem
*
addr
)
{
return
__inw
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)(
addr
);
mb
();
return
ret
;
}
u
32
_inl
(
unsigned
long
addr
)
u
nsigned
int
ioread32
(
void
__iomem
*
addr
)
{
return
__inl
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread32
)(
addr
);
mb
();
return
ret
;
}
void
iowrite8
(
u8
b
,
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
b
,
addr
);
mb
();
}
void
_outb
(
u8
b
,
unsigned
long
addr
)
void
iowrite16
(
u16
b
,
void
__iomem
*
addr
)
{
__outb
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
b
,
addr
);
mb
();
}
void
_outw
(
u16
b
,
unsigned
long
addr
)
void
iowrite32
(
u32
b
,
void
__iomem
*
addr
)
{
__outw
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
iowrite32
)(
b
,
addr
);
mb
();
}
void
_outl
(
u32
b
,
unsigned
long
addr
)
EXPORT_SYMBOL
(
ioread8
);
EXPORT_SYMBOL
(
ioread16
);
EXPORT_SYMBOL
(
ioread32
);
EXPORT_SYMBOL
(
iowrite8
);
EXPORT_SYMBOL
(
iowrite16
);
EXPORT_SYMBOL
(
iowrite32
);
u8
inb
(
unsigned
long
port
)
{
__outl
(
b
,
addr
);
return
ioread8
(
ioport_map
(
port
,
1
)
);
}
u
8
___raw_readb
(
unsigned
long
addr
)
u
16
inw
(
unsigned
long
port
)
{
return
__readb
(
addr
);
return
ioread16
(
ioport_map
(
port
,
2
)
);
}
u
16
___raw_readw
(
unsigned
long
addr
)
u
32
inl
(
unsigned
long
port
)
{
return
__readw
(
addr
);
return
ioread32
(
ioport_map
(
port
,
4
)
);
}
u32
___raw_readl
(
unsigned
long
addr
)
void
outb
(
u8
b
,
unsigned
long
port
)
{
return
__readl
(
addr
);
iowrite8
(
b
,
ioport_map
(
port
,
1
)
);
}
u64
___raw_readq
(
unsigned
long
addr
)
void
outw
(
u16
b
,
unsigned
long
port
)
{
return
__readq
(
addr
);
iowrite16
(
b
,
ioport_map
(
port
,
2
)
);
}
u8
_readb
(
unsigned
long
addr
)
void
outl
(
u32
b
,
unsigned
long
port
)
{
unsigned
long
r
=
__readb
(
addr
);
mb
();
return
r
;
iowrite32
(
b
,
ioport_map
(
port
,
4
));
}
u16
_readw
(
unsigned
long
addr
)
EXPORT_SYMBOL
(
inb
);
EXPORT_SYMBOL
(
inw
);
EXPORT_SYMBOL
(
inl
);
EXPORT_SYMBOL
(
outb
);
EXPORT_SYMBOL
(
outw
);
EXPORT_SYMBOL
(
outl
);
u8
__raw_readb
(
const
volatile
void
__iomem
*
addr
)
{
unsigned
long
r
=
__readw
(
addr
);
mb
();
return
r
;
return
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
addr
);
}
u
32
_readl
(
unsigned
long
addr
)
u
16
__raw_readw
(
const
volatile
void
__iomem
*
addr
)
{
unsigned
long
r
=
__readl
(
addr
);
mb
();
return
r
;
return
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
addr
);
}
u
64
_readq
(
unsigned
long
addr
)
u
32
__raw_readl
(
const
volatile
void
__iomem
*
addr
)
{
unsigned
long
r
=
__readq
(
addr
);
mb
();
return
r
;
return
IO_CONCAT
(
__IO_PREFIX
,
readl
)(
addr
);
}
u64
__raw_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readq
)(
addr
);
}
void
__raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
b
,
addr
);
}
void
__
_raw_writeb
(
u8
b
,
unsigned
long
addr
)
void
__
raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__writeb
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
writew
)
(
b
,
addr
);
}
void
__
_raw_writew
(
u16
b
,
unsigned
long
addr
)
void
__
raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
__writew
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
writel
)(
b
,
addr
);
}
void
__raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeq
)(
b
,
addr
);
}
EXPORT_SYMBOL
(
__raw_readb
);
EXPORT_SYMBOL
(
__raw_readw
);
EXPORT_SYMBOL
(
__raw_readl
);
EXPORT_SYMBOL
(
__raw_readq
);
EXPORT_SYMBOL
(
__raw_writeb
);
EXPORT_SYMBOL
(
__raw_writew
);
EXPORT_SYMBOL
(
__raw_writel
);
EXPORT_SYMBOL
(
__raw_writeq
);
u8
readb
(
const
volatile
void
__iomem
*
addr
)
{
u8
ret
=
__raw_readb
(
addr
);
mb
();
return
ret
;
}
u16
readw
(
const
volatile
void
__iomem
*
addr
)
{
u16
ret
=
__raw_readw
(
addr
);
mb
();
return
ret
;
}
void
___raw_writel
(
u32
b
,
unsigned
long
addr
)
u32
readl
(
const
volatile
void
__iomem
*
addr
)
{
__writel
(
b
,
addr
);
u32
ret
=
__raw_readl
(
addr
);
mb
();
return
ret
;
}
void
___raw_writeq
(
u64
b
,
unsigned
long
addr
)
u64
readq
(
const
volatile
void
__iomem
*
addr
)
{
__writeq
(
b
,
addr
);
u64
ret
=
__raw_readq
(
addr
);
mb
();
return
ret
;
}
void
_writeb
(
u8
b
,
unsigned
long
addr
)
void
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__writeb
(
b
,
addr
);
__
raw_
writeb
(
b
,
addr
);
mb
();
}
void
_writew
(
u16
b
,
unsigned
long
addr
)
void
writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__writew
(
b
,
addr
);
__
raw_
writew
(
b
,
addr
);
mb
();
}
void
_writel
(
u32
b
,
unsigned
long
addr
)
void
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
__writel
(
b
,
addr
);
__
raw_
writel
(
b
,
addr
);
mb
();
}
void
_writeq
(
u64
b
,
unsigned
long
addr
)
void
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
__writeq
(
b
,
addr
);
__
raw_
writeq
(
b
,
addr
);
mb
();
}
EXPORT_SYMBOL
(
readb
);
EXPORT_SYMBOL
(
readw
);
EXPORT_SYMBOL
(
readl
);
EXPORT_SYMBOL
(
readq
);
EXPORT_SYMBOL
(
writeb
);
EXPORT_SYMBOL
(
writew
);
EXPORT_SYMBOL
(
writel
);
EXPORT_SYMBOL
(
writeq
);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
*/
void
i
nsb
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
void
i
oread8_rep
(
void
__iomem
*
port
,
void
*
dst
,
unsigned
long
count
)
{
while
((
(
unsigned
long
)
dst
)
&
0x3
)
{
while
((
unsigned
long
)
dst
&
0x3
)
{
if
(
!
count
)
return
;
count
--
;
*
(
unsigned
char
*
)
dst
=
inb
(
port
);
*
(
unsigned
char
*
)
dst
=
ioread8
(
port
);
dst
+=
1
;
}
while
(
count
>=
4
)
{
unsigned
int
w
;
count
-=
4
;
w
=
i
nb
(
port
);
w
|=
i
nb
(
port
)
<<
8
;
w
|=
i
nb
(
port
)
<<
16
;
w
|=
i
nb
(
port
)
<<
24
;
*
(
unsigned
int
*
)
dst
=
w
;
w
=
i
oread8
(
port
);
w
|=
i
oread8
(
port
)
<<
8
;
w
|=
i
oread8
(
port
)
<<
16
;
w
|=
i
oread8
(
port
)
<<
24
;
*
(
unsigned
int
*
)
dst
=
w
;
dst
+=
4
;
}
while
(
count
)
{
--
count
;
*
(
unsigned
char
*
)
dst
=
inb
(
port
);
*
(
unsigned
char
*
)
dst
=
ioread8
(
port
);
dst
+=
1
;
}
}
void
insb
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
{
ioread8_rep
(
ioport_map
(
port
,
1
),
dst
,
count
);
}
EXPORT_SYMBOL
(
ioread8_rep
);
EXPORT_SYMBOL
(
insb
);
/*
* Read COUNT 16-bit words from port PORT into memory starting at
...
...
@@ -172,33 +254,39 @@ void insb (unsigned long port, void *dst, unsigned long count)
* the interfaces seems to be slow: just using the inlined version
* of the inw() breaks things.
*/
void
i
nsw
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
void
i
oread16_rep
(
void
__iomem
*
port
,
void
*
dst
,
unsigned
long
count
)
{
if
(((
unsigned
long
)
dst
)
&
0x3
)
{
if
(((
unsigned
long
)
dst
)
&
0x1
)
{
panic
(
"insw: memory not short aligned"
);
}
if
(
unlikely
((
unsigned
long
)
dst
&
0x3
))
{
if
(
!
count
)
return
;
BUG_ON
((
unsigned
long
)
dst
&
0x1
);
count
--
;
*
(
unsigned
short
*
)
dst
=
inw
(
port
);
*
(
unsigned
short
*
)
dst
=
ioread16
(
port
);
dst
+=
2
;
}
while
(
count
>=
2
)
{
unsigned
int
w
;
count
-=
2
;
w
=
i
nw
(
port
);
w
|=
i
nw
(
port
)
<<
16
;
*
(
unsigned
int
*
)
dst
=
w
;
w
=
i
oread16
(
port
);
w
|=
i
oread16
(
port
)
<<
16
;
*
(
unsigned
int
*
)
dst
=
w
;
dst
+=
4
;
}
if
(
count
)
{
*
(
unsigned
short
*
)
dst
=
inw
(
port
);
*
(
unsigned
short
*
)
dst
=
ioread16
(
port
);
}
}
void
insw
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
{
ioread16_rep
(
ioport_map
(
port
,
2
),
dst
,
count
);
}
EXPORT_SYMBOL
(
ioread16_rep
);
EXPORT_SYMBOL
(
insw
);
/*
* Read COUNT 32-bit words from port PORT into memory starting at
...
...
@@ -206,80 +294,31 @@ void insw (unsigned long port, void *dst, unsigned long count)
* but the interfaces seems to be slow: just using the inlined version
* of the inl() breaks things.
*/
void
i
nsl
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
void
i
oread32_rep
(
void
__iomem
*
port
,
void
*
dst
,
unsigned
long
count
)
{
unsigned
int
l
=
0
,
l2
;
if
(
!
count
)
return
;
switch
(((
unsigned
long
)
dst
)
&
0x3
)
{
case
0x00
:
/* Buffer 32-bit aligned */
while
(
count
--
)
{
*
(
unsigned
int
*
)
dst
=
inl
(
port
);
dst
+=
4
;
}
break
;
/* Assuming little endian Alphas in cases 0x01 -- 0x03 ... */
case
0x02
:
/* Buffer 16-bit aligned */
--
count
;
l
=
inl
(
port
);
*
(
unsigned
short
*
)
dst
=
l
;
dst
+=
2
;
while
(
count
--
)
{
l2
=
inl
(
port
);
*
(
unsigned
int
*
)
dst
=
l
>>
16
|
l2
<<
16
;
if
(
unlikely
((
unsigned
long
)
dst
&
0x3
))
{
while
(
count
--
)
{
struct
S
{
int
x
__attribute__
((
packed
));
};
((
struct
S
*
)
dst
)
->
x
=
ioread32
(
port
);
dst
+=
4
;
l
=
l2
;
}
*
(
unsigned
short
*
)
dst
=
l
>>
16
;
break
;
case
0x01
:
/* Buffer 8-bit aligned */
--
count
;
l
=
inl
(
port
);
*
(
unsigned
char
*
)
dst
=
l
;
dst
+=
1
;
*
(
unsigned
short
*
)
dst
=
l
>>
8
;
dst
+=
2
;
while
(
count
--
)
{
l2
=
inl
(
port
);
*
(
unsigned
int
*
)
dst
=
l
>>
24
|
l2
<<
8
;
dst
+=
4
;
l
=
l2
;
}
*
(
unsigned
char
*
)
dst
=
l
>>
24
;
break
;
case
0x03
:
/* Buffer 8-bit aligned */
--
count
;
l
=
inl
(
port
);
*
(
unsigned
char
*
)
dst
=
l
;
dst
+=
1
;
while
(
count
--
)
{
l2
=
inl
(
port
);
*
(
unsigned
int
*
)
dst
=
l
<<
24
|
l2
>>
8
;
}
else
{
/* Buffer 32-bit aligned. */
while
(
count
--
)
{
*
(
unsigned
int
*
)
dst
=
ioread32
(
port
);
dst
+=
4
;
l
=
l2
;
}
*
(
unsigned
short
*
)
dst
=
l
>>
8
;
dst
+=
2
;
*
(
unsigned
char
*
)
dst
=
l
>>
24
;
break
;
}
}
void
insl
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
{
ioread32_rep
(
ioport_map
(
port
,
4
),
dst
,
count
);
}
EXPORT_SYMBOL
(
ioread32_rep
);
EXPORT_SYMBOL
(
insl
);
/*
* Like insb but in the opposite direction.
...
...
@@ -287,28 +326,35 @@ void insl (unsigned long port, void *dst, unsigned long count)
* doing byte reads the "slow" way isn't nearly as slow as
* doing byte writes the slow way (no r-m-w cycle).
*/
void
outsb
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
void
iowrite8_rep
(
void
__iomem
*
port
,
const
void
*
x
src
,
unsigned
long
count
)
{
while
(
count
)
{
count
--
;
outb
(
*
(
char
*
)
src
,
port
);
src
+=
1
;
}
const
unsigned
char
*
src
=
xsrc
;
while
(
count
--
)
iowrite8
(
*
src
++
,
port
);
}
void
outsb
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
{
iowrite8_rep
(
ioport_map
(
port
,
1
),
src
,
count
);
}
EXPORT_SYMBOL
(
iowrite8_rep
);
EXPORT_SYMBOL
(
outsb
);
/*
* Like insw but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Performance is important, but the
* interfaces seems to be slow: just using the inlined version of the
* outw() breaks things.
*/
void
outsw
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
void
iowrite16_rep
(
void
__iomem
*
port
,
const
void
*
src
,
unsigned
long
count
)
{
if
(
((
unsigned
long
)
src
)
&
0x3
)
{
if
(
((
unsigned
long
)
src
)
&
0x1
)
{
panic
(
"outsw: memory not short aligned"
)
;
}
outw
(
*
(
unsigned
short
*
)
src
,
port
);
if
(
unlikely
((
unsigned
long
)
src
&
0x3
)
)
{
if
(
!
count
)
return
;
BUG_ON
((
unsigned
long
)
src
&
0x1
);
iowrite16
(
*
(
unsigned
short
*
)
src
,
port
);
src
+=
2
;
--
count
;
}
...
...
@@ -316,107 +362,68 @@ void outsw (unsigned long port, const void *src, unsigned long count)
while
(
count
>=
2
)
{
unsigned
int
w
;
count
-=
2
;
w
=
*
(
unsigned
int
*
)
src
;
w
=
*
(
unsigned
int
*
)
src
;
src
+=
4
;
outw
(
w
>>
0
,
port
);
outw
(
w
>>
16
,
port
);
iowrite16
(
w
>>
0
,
port
);
iowrite16
(
w
>>
16
,
port
);
}
if
(
count
)
{
outw
(
*
(
unsigned
short
*
)
src
,
port
);
iowrite16
(
*
(
unsigned
short
*
)
src
,
port
);
}
}
void
outsw
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
{
iowrite16_rep
(
ioport_map
(
port
,
2
),
src
,
count
);
}
EXPORT_SYMBOL
(
iowrite16_rep
);
EXPORT_SYMBOL
(
outsw
);
/*
* Like insl but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Works with any alignment in SRC.
*
Performance is important, but the interfaces seems to be slow:
* Performance is important, but the interfaces seems to be slow:
* just using the inlined version of the outl() breaks things.
*/
void
outsl
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
void
iowrite32_rep
(
void
__iomem
*
port
,
const
void
*
src
,
unsigned
long
count
)
{
unsigned
int
l
=
0
,
l2
;
if
(
!
count
)
return
;
switch
(((
unsigned
long
)
src
)
&
0x3
)
{
case
0x00
:
/* Buffer 32-bit aligned */
while
(
count
--
)
{
outl
(
*
(
unsigned
int
*
)
src
,
port
);
src
+=
4
;
}
break
;
case
0x02
:
/* Buffer 16-bit aligned */
--
count
;
l
=
*
(
unsigned
short
*
)
src
<<
16
;
src
+=
2
;
while
(
count
--
)
{
l2
=
*
(
unsigned
int
*
)
src
;
src
+=
4
;
outl
(
l
>>
16
|
l2
<<
16
,
port
);
l
=
l2
;
}
l2
=
*
(
unsigned
short
*
)
src
;
outl
(
l
>>
16
|
l2
<<
16
,
port
);
break
;
case
0x01
:
/* Buffer 8-bit aligned */
--
count
;
l
=
*
(
unsigned
char
*
)
src
<<
8
;
src
+=
1
;
l
|=
*
(
unsigned
short
*
)
src
<<
16
;
src
+=
2
;
while
(
count
--
)
{
l2
=
*
(
unsigned
int
*
)
src
;
if
(
unlikely
((
unsigned
long
)
src
&
0x3
))
{
while
(
count
--
)
{
struct
S
{
int
x
__attribute__
((
packed
));
};
iowrite32
(((
struct
S
*
)
src
)
->
x
,
port
);
src
+=
4
;
outl
(
l
>>
8
|
l2
<<
24
,
port
);
l
=
l2
;
}
l2
=
*
(
unsigned
char
*
)
src
;
outl
(
l
>>
8
|
l2
<<
24
,
port
);
break
;
case
0x03
:
/* Buffer 8-bit aligned */
--
count
;
l
=
*
(
unsigned
char
*
)
src
<<
24
;
src
+=
1
;
while
(
count
--
)
{
l2
=
*
(
unsigned
int
*
)
src
;
}
else
{
/* Buffer 32-bit aligned. */
while
(
count
--
)
{
iowrite32
(
*
(
unsigned
int
*
)
src
,
port
);
src
+=
4
;
outl
(
l
>>
24
|
l2
<<
8
,
port
);
l
=
l2
;
}
l2
=
*
(
unsigned
short
*
)
src
;
src
+=
2
;
l2
|=
*
(
unsigned
char
*
)
src
<<
16
;
outl
(
l
>>
24
|
l2
<<
8
,
port
);
break
;
}
}
void
outsl
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
{
iowrite32_rep
(
ioport_map
(
port
,
4
),
src
,
count
);
}
EXPORT_SYMBOL
(
iowrite32_rep
);
EXPORT_SYMBOL
(
outsl
);
/*
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
void
_memcpy_fromio
(
void
*
to
,
unsigned
long
from
,
long
count
)
void
memcpy_fromio
(
void
*
to
,
const
volatile
void
__iomem
*
from
,
long
count
)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
if
(
count
>=
8
&&
((
u
nsigned
long
)
to
&
7
)
==
(
from
&
7
))
{
if
(
count
>=
8
&&
((
u
64
)
to
&
7
)
==
((
u64
)
from
&
7
))
{
count
-=
8
;
do
{
*
(
u64
*
)
to
=
__raw_readq
(
from
);
...
...
@@ -427,7 +434,7 @@ void _memcpy_fromio(void * to, unsigned long from, long count)
count
+=
8
;
}
if
(
count
>=
4
&&
((
u
nsigned
long
)
to
&
3
)
==
(
from
&
3
))
{
if
(
count
>=
4
&&
((
u
64
)
to
&
3
)
==
((
u64
)
from
&
3
))
{
count
-=
4
;
do
{
*
(
u32
*
)
to
=
__raw_readl
(
from
);
...
...
@@ -438,7 +445,7 @@ void _memcpy_fromio(void * to, unsigned long from, long count)
count
+=
4
;
}
if
(
count
>=
2
&&
((
u
nsigned
long
)
to
&
1
)
==
(
from
&
1
))
{
if
(
count
>=
2
&&
((
u
64
)
to
&
1
)
==
((
u64
)
from
&
1
))
{
count
-=
2
;
do
{
*
(
u16
*
)
to
=
__raw_readw
(
from
);
...
...
@@ -455,19 +462,23 @@ void _memcpy_fromio(void * to, unsigned long from, long count)
to
++
;
from
++
;
}
mb
();
}
EXPORT_SYMBOL
(
memcpy_fromio
);
/*
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
void
_memcpy_toio
(
unsigned
long
to
,
const
void
*
from
,
long
count
)
void
memcpy_toio
(
volatile
void
__iomem
*
to
,
const
void
*
from
,
long
count
)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
/* FIXME -- align FROM. */
if
(
count
>=
8
&&
(
to
&
7
)
==
((
unsigned
long
)
from
&
7
))
{
if
(
count
>=
8
&&
(
(
u64
)
to
&
7
)
==
((
u64
)
from
&
7
))
{
count
-=
8
;
do
{
__raw_writeq
(
*
(
const
u64
*
)
from
,
to
);
...
...
@@ -478,7 +489,7 @@ void _memcpy_toio(unsigned long to, const void * from, long count)
count
+=
8
;
}
if
(
count
>=
4
&&
(
to
&
3
)
==
((
unsigned
long
)
from
&
3
))
{
if
(
count
>=
4
&&
(
(
u64
)
to
&
3
)
==
((
u64
)
from
&
3
))
{
count
-=
4
;
do
{
__raw_writel
(
*
(
const
u32
*
)
from
,
to
);
...
...
@@ -489,7 +500,7 @@ void _memcpy_toio(unsigned long to, const void * from, long count)
count
+=
4
;
}
if
(
count
>=
2
&&
(
to
&
1
)
==
((
unsigned
long
)
from
&
1
))
{
if
(
count
>=
2
&&
(
(
u64
)
to
&
1
)
==
((
u64
)
from
&
1
))
{
count
-=
2
;
do
{
__raw_writew
(
*
(
const
u16
*
)
from
,
to
);
...
...
@@ -509,27 +520,30 @@ void _memcpy_toio(unsigned long to, const void * from, long count)
mb
();
}
EXPORT_SYMBOL
(
memcpy_toio
);
/*
* "memset" on IO memory space.
*/
void
_memset_c_io
(
unsigned
long
to
,
unsigned
long
c
,
long
count
)
void
_memset_c_io
(
volatile
void
__iomem
*
to
,
unsigned
long
c
,
long
count
)
{
/* Handle any initial odd byte */
if
(
count
>
0
&&
(
to
&
1
))
{
if
(
count
>
0
&&
(
(
u64
)
to
&
1
))
{
__raw_writeb
(
c
,
to
);
to
++
;
count
--
;
}
/* Handle any initial odd halfword */
if
(
count
>=
2
&&
(
to
&
2
))
{
if
(
count
>=
2
&&
(
(
u64
)
to
&
2
))
{
__raw_writew
(
c
,
to
);
to
+=
2
;
count
-=
2
;
}
/* Handle any initial odd word */
if
(
count
>=
4
&&
(
to
&
4
))
{
if
(
count
>=
4
&&
(
(
u64
)
to
&
4
))
{
__raw_writel
(
c
,
to
);
to
+=
4
;
count
-=
4
;
...
...
@@ -568,27 +582,50 @@ void _memset_c_io(unsigned long to, unsigned long c, long count)
mb
();
}
EXPORT_SYMBOL
(
_memset_c_io
);
/* A version of memcpy used by the vga console routines to move data around
arbitrarily between screen and main memory. */
void
scr_memcpyw
(
u16
*
d
,
const
u16
*
s
,
unsigned
int
count
)
{
if
(
!
__is_ioaddr
((
unsigned
long
)
s
))
{
/* Source is memory. */
if
(
!
__is_ioaddr
((
unsigned
long
)
d
))
memcpy
(
d
,
s
,
count
);
else
memcpy_toio
(
d
,
s
,
count
);
}
else
{
/* Source is screen. */
if
(
!
__is_ioaddr
((
unsigned
long
)
d
))
memcpy_fromio
(
d
,
s
,
count
);
else
{
const
u16
__iomem
*
ios
=
(
const
u16
__iomem
*
)
s
;
u16
__iomem
*
iod
=
(
u16
__iomem
*
)
d
;
int
s_isio
=
__is_ioaddr
(
s
);
int
d_isio
=
__is_ioaddr
(
d
);
if
(
s_isio
)
{
if
(
d_isio
)
{
/* FIXME: Should handle unaligned ops and
operation widening. */
count
/=
2
;
while
(
count
--
)
{
u16
tmp
=
__raw_readw
(
(
unsigned
long
)(
s
++
)
);
__raw_writew
(
tmp
,
(
unsigned
long
)(
d
++
)
);
u16
tmp
=
__raw_readw
(
ios
++
);
__raw_writew
(
tmp
,
iod
++
);
}
}
else
memcpy_fromio
(
d
,
ios
,
count
);
}
else
{
if
(
d_isio
)
memcpy_toio
(
iod
,
s
,
count
);
else
memcpy
(
d
,
s
,
count
);
}
}
EXPORT_SYMBOL
(
scr_memcpyw
);
void
__iomem
*
ioport_map
(
unsigned
long
port
,
unsigned
int
size
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioportmap
)
(
port
);
}
void
ioport_unmap
(
void
__iomem
*
addr
)
{
}
EXPORT_SYMBOL
(
ioport_map
);
EXPORT_SYMBOL
(
ioport_unmap
);
arch/alpha/mm/fault.c
View file @
17647b1d
...
...
@@ -98,7 +98,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
by ignoring such an instruction. */
if
(
cause
==
0
)
{
unsigned
int
insn
;
__get_user
(
insn
,
(
unsigned
int
*
)
regs
->
pc
);
__get_user
(
insn
,
(
unsigned
int
__user
*
)
regs
->
pc
);
if
((
insn
>>
21
&
0x1f
)
==
0x1f
&&
/* ldq ldl ldt lds ldg ldf ldwu ldbu */
(
1ul
<<
(
insn
>>
26
)
&
0x30f00001400ul
))
{
...
...
include/asm-alpha/compiler.h
View file @
17647b1d
...
...
@@ -90,4 +90,14 @@
__asm__("stw %1,%0" : "=m"(mem) : "r"(val))
#endif
/* Some idiots over in <linux/compiler.h> thought inline should imply
always_inline. This breaks stuff. We'll include this file whenever
we run into such problems. */
#include <linux/compiler.h>
#undef inline
#undef __inline__
#undef __inline
#endif
/* __ALPHA_COMPILER_H */
include/asm-alpha/core_apecs.h
View file @
17647b1d
...
...
@@ -370,178 +370,142 @@ struct el_apecs_procdata
* data to/from the right byte-lanes.
*/
#define vip volatile int *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
apecs_inb
(
unsigned
long
addr
)
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
#define APECS_SET_HAE \
do { \
if (addr >= (1UL << 24)) { \
unsigned long msb = addr & 0xf8000000; \
addr -= msb; \
set_hae(msb); \
} \
} while (0)
__EXTERN_INLINE
unsigned
int
apecs_ioread8
(
void
__iomem
*
xaddr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_IO
+
0x00
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x00
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
apecs_
outb
(
u8
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
apecs_
iowrite8
(
u8
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
w
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x00
;
}
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
apecs_inw
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
apecs_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x08
)
=
w
;
mb
();
}
__EXTERN_INLINE
u32
apecs_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x18
);
}
__EXTERN_INLINE
void
apecs_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x18
)
=
b
;
mb
();
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
__EXTERN_INLINE
u8
apecs_readb
(
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
apecs_ioread16
(
void
__iomem
*
xaddr
)
{
unsigned
long
result
,
msb
;
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x08
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
apecs_readw
(
unsigned
long
addr
)
{
unsigned
long
result
,
msb
;
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x08
);
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u32
apecs_readl
(
unsigned
long
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
}
__EXTERN_INLINE
u64
apecs_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
apecs_writeb
(
u8
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
apecs_iowrite16
(
u16
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
msb
;
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x08
;
}
*
(
vuip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x00
)
=
b
*
0x01010101
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
apecs_writew
(
u16
b
,
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
apecs_ioread32
(
void
__iomem
*
x
addr
)
{
unsigned
long
msb
;
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
}
*
(
vuip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x08
)
=
b
*
0x00010001
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
APECS_DENSE_MEM
)
addr
=
((
addr
-
APECS_IO
)
<<
5
)
+
APECS_IO
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
apecs_
writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
apecs_
iowrite32
(
u32
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
APECS_DENSE_MEM
)
addr
=
((
addr
-
APECS_IO
)
<<
5
)
+
APECS_IO
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
apecs_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
apecs_ioportmap
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
APECS_IO
)
;
}
__EXTERN_INLINE
unsigned
long
apecs_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
apecs_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
addr
+
APECS_DENSE_MEM
;
return
(
void
__iomem
*
)(
addr
+
APECS_DENSE_MEM
)
;
}
__EXTERN_INLINE
void
apecs_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
int
apecs_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
IDENT_ADDR
+
0x180000000UL
;
}
__EXTERN_INLINE
int
apecs_is_
ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
apecs_is_
mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
IDENT_ADDR
+
0x180000000UL
;
return
(
unsigned
long
)
addr
>=
APECS_DENSE_MEM
;
}
#undef APECS_SET_HAE
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) apecs_inb((unsigned long)(p))
#define __inw(p) apecs_inw((unsigned long)(p))
#define __inl(p) apecs_inl((unsigned long)(p))
#define __outb(x,p) apecs_outb((x),(unsigned long)(p))
#define __outw(x,p) apecs_outw((x),(unsigned long)(p))
#define __outl(x,p) apecs_outl((x),(unsigned long)(p))
#define __readb(a) apecs_readb((unsigned long)(a))
#define __readw(a) apecs_readw((unsigned long)(a))
#define __readl(a) apecs_readl((unsigned long)(a))
#define __readq(a) apecs_readq((unsigned long)(a))
#define __writeb(x,a) apecs_writeb((x),(unsigned long)(a))
#define __writew(x,a) apecs_writew((x),(unsigned long)(a))
#define __writel(x,a) apecs_writel((x),(unsigned long)(a))
#define __writeq(x,a) apecs_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) apecs_ioremap((unsigned long)(a),(s))
#define __iounmap(a) apecs_iounmap((unsigned long)(a))
#define __is_ioaddr(a) apecs_is_ioaddr((unsigned long)(a))
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX apecs
#define apecs_trivial_io_bw 0
#define apecs_trivial_io_lq 0
#define apecs_trivial_rw_bw 2
#define apecs_trivial_rw_lq 1
#define apecs_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_cia.h
View file @
17647b1d
...
...
@@ -306,90 +306,6 @@ struct el_CIA_sysdata_mcheck {
* get at PCI memory and I/O.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vip volatile int *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
cia_inb
(
unsigned
long
addr
)
{
long
result
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
cia_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
cia_inw
(
unsigned
long
addr
)
{
long
result
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
cia_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x08
)
=
w
;
mb
();
}
__EXTERN_INLINE
u32
cia_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x18
);
}
__EXTERN_INLINE
void
cia_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x18
)
=
b
;
mb
();
}
__EXTERN_INLINE
u8
cia_bwx_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
CIA BWX I/O region, but that doesn't take care of legacy
ISA crap. */
return
__kernel_ldbu
(
*
(
vucp
)(
addr
+
CIA_BW_IO
));
}
__EXTERN_INLINE
void
cia_bwx_outb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)(
addr
+
CIA_BW_IO
));
mb
();
}
__EXTERN_INLINE
u16
cia_bwx_inw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)(
addr
+
CIA_BW_IO
));
}
__EXTERN_INLINE
void
cia_bwx_outw
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)(
addr
+
CIA_BW_IO
));
mb
();
}
__EXTERN_INLINE
u32
cia_bwx_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)(
addr
+
CIA_BW_IO
);
}
__EXTERN_INLINE
void
cia_bwx_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)(
addr
+
CIA_BW_IO
)
=
b
;
mb
();
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
...
...
@@ -422,195 +338,158 @@ __EXTERN_INLINE void cia_bwx_outl(u32 b, unsigned long addr)
*
*/
__EXTERN_INLINE
u8
cia_readb
(
unsigned
long
addr
)
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
unsigned
int
cia_ioread8
(
void
__iomem
*
xaddr
)
{
unsigned
long
result
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
/* We can use CIA_MEM_R1_MASK for io ports too, since it is large
enough to cover all io ports, and smaller than CIA_IO. */
addr
&=
CIA_MEM_R1_MASK
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x00
);
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x00
;
else
base_and_type
=
CIA_IO
+
0x00
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
cia_readw
(
unsigned
long
addr
)
__EXTERN_INLINE
void
cia_iowrite8
(
u8
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
result
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
,
base_and_type
;
addr
&=
CIA_MEM_R1_MASK
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x00
;
else
base_and_type
=
CIA_IO
+
0x00
;
__EXTERN_INLINE
void
cia_writeb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
addr
&=
CIA_MEM_R1_MASK
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x00
)
=
w
;
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
cia_writew
(
u16
b
,
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
cia_ioread16
(
void
__iomem
*
x
addr
)
{
unsigned
long
w
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
addr
&=
CIA_MEM_R1_MASK
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x08
)
=
w
;
}
__EXTERN_INLINE
u32
cia_readl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
cia_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
cia_writel
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
}
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x08
;
else
base_and_type
=
CIA_IO
+
0x08
;
__EXTERN_INLINE
void
cia_writeq
(
u64
b
,
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
unsigned
long
cia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
cia_iowrite16
(
u16
b
,
void
__iomem
*
xaddr
)
{
return
addr
+
CIA_DENSE_MEM
;
}
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
,
base_and_type
;
__EXTERN_INLINE
void
cia_iounmap
(
unsigned
long
addr
)
{
return
;
}
addr
&=
CIA_MEM_R1_MASK
;
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x08
;
else
base_and_type
=
CIA_IO
+
0x08
;
__EXTERN_INLINE
u8
cia_bwx_readb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
u
16
cia_bwx_readw
(
unsigned
long
addr
)
__EXTERN_INLINE
u
nsigned
int
cia_ioread32
(
void
__iomem
*
x
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
CIA_DENSE_MEM
)
addr
=
((
addr
-
CIA_IO
)
<<
5
)
+
CIA_IO
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u32
cia_bwx_readl
(
unsigned
long
addr
)
__EXTERN_INLINE
void
cia_iowrite32
(
u32
b
,
void
__iomem
*
x
addr
)
{
return
*
(
vuip
)
addr
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
CIA_DENSE_MEM
)
addr
=
((
addr
-
CIA_IO
)
<<
5
)
+
CIA_IO
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
u64
cia_bwx_readq
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
cia_ioportmap
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
return
(
void
__iomem
*
)(
addr
+
CIA_IO
)
;
}
__EXTERN_INLINE
void
cia_bwx_writeb
(
u8
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
cia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
return
(
void
__iomem
*
)(
addr
+
CIA_DENSE_MEM
);
}
__EXTERN_INLINE
void
cia_bwx_writew
(
u16
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
cia_is_ioaddr
(
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
)
;
return
addr
>=
IDENT_ADDR
+
0x8000000000UL
;
}
__EXTERN_INLINE
void
cia_bwx_writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
cia_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
return
(
unsigned
long
)
addr
>=
CIA_DENSE_MEM
;
}
__EXTERN_INLINE
void
cia_bwx_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
cia_bwx_ioportmap
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
CIA_BW_IO
)
;
}
__EXTERN_INLINE
unsigned
long
cia_bwx_ioremap
(
unsigned
long
addr
,
__EXTERN_INLINE
void
__iomem
*
cia_bwx_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
addr
+
CIA_BW_MEM
;
return
(
void
__iomem
*
)(
addr
+
CIA_BW_MEM
)
;
}
__EXTERN_INLINE
void
cia_bwx_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
int
cia_bwx_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
IDENT_ADDR
+
0x8000000000UL
;
}
__EXTERN_INLINE
int
cia_
is_ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
cia_
bwx_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
IDENT_ADDR
+
0x8000000000UL
;
return
(
unsigned
long
)
addr
<
CIA_BW_IO
;
}
#undef vucp
#undef vusp
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#undef __IO_PREFIX
#define __IO_PREFIX cia
#define cia_trivial_rw_bw 2
#define cia_trivial_rw_lq 1
#define cia_trivial_io_bw 0
#define cia_trivial_io_lq 0
#define cia_trivial_iounmap 1
#include <asm/io_trivial.h>
#undef __IO_PREFIX
#define __IO_PREFIX cia_bwx
#define cia_bwx_trivial_rw_bw 1
#define cia_bwx_trivial_rw_lq 1
#define cia_bwx_trivial_io_bw 1
#define cia_bwx_trivial_io_lq 1
#define cia_bwx_trivial_iounmap 1
#include <asm/io_trivial.h>
#undef __IO_PREFIX
#ifdef CONFIG_ALPHA_PYXIS
# define __inb(p) cia_bwx_inb((unsigned long)(p))
# define __inw(p) cia_bwx_inw((unsigned long)(p))
# define __inl(p) cia_bwx_inl((unsigned long)(p))
# define __outb(x,p) cia_bwx_outb((x),(unsigned long)(p))
# define __outw(x,p) cia_bwx_outw((x),(unsigned long)(p))
# define __outl(x,p) cia_bwx_outl((x),(unsigned long)(p))
# define __readb(a) cia_bwx_readb((unsigned long)(a))
# define __readw(a) cia_bwx_readw((unsigned long)(a))
# define __readl(a) cia_bwx_readl((unsigned long)(a))
# define __readq(a) cia_bwx_readq((unsigned long)(a))
# define __writeb(x,a) cia_bwx_writeb((x),(unsigned long)(a))
# define __writew(x,a) cia_bwx_writew((x),(unsigned long)(a))
# define __writel(x,a) cia_bwx_writel((x),(unsigned long)(a))
# define __writeq(x,a) cia_bwx_writeq((x),(unsigned long)(a))
# define __ioremap(a,s) cia_bwx_ioremap((unsigned long)(a),(s))
# define __iounmap(a) cia_bwx_iounmap((unsigned long)(a))
# define inb(p) __inb(p)
# define inw(p) __inw(p)
# define inl(p) __inl(p)
# define outb(x,p) __outb((x),(p))
# define outw(x,p) __outw((x),(p))
# define outl(x,p) __outl((x),(p))
# define __raw_readb(a) __readb(a)
# define __raw_readw(a) __readw(a)
# define __raw_readl(a) __readl(a)
# define __raw_readq(a) __readq(a)
# define __raw_writeb(x,a) __writeb((x),(a))
# define __raw_writew(x,a) __writew((x),(a))
# define __raw_writel(x,a) __writel((x),(a))
# define __raw_writeq(x,a) __writeq((x),(a))
#define __IO_PREFIX cia_bwx
#else
# define __inb(p) cia_inb((unsigned long)(p))
# define __inw(p) cia_inw((unsigned long)(p))
# define __inl(p) cia_inl((unsigned long)(p))
# define __outb(x,p) cia_outb((x),(unsigned long)(p))
# define __outw(x,p) cia_outw((x),(unsigned long)(p))
# define __outl(x,p) cia_outl((x),(unsigned long)(p))
# define __readb(a) cia_readb((unsigned long)(a))
# define __readw(a) cia_readw((unsigned long)(a))
# define __readl(a) cia_readl((unsigned long)(a))
# define __readq(a) cia_readq((unsigned long)(a))
# define __writeb(x,a) cia_writeb((x),(unsigned long)(a))
# define __writew(x,a) cia_writew((x),(unsigned long)(a))
# define __writel(x,a) cia_writel((x),(unsigned long)(a))
# define __writeq(x,a) cia_writeq((x),(unsigned long)(a))
# define __ioremap(a,s) cia_ioremap((unsigned long)(a),(s))
# define __iounmap(a) cia_iounmap((unsigned long)(a))
# define __raw_readl(a) __readl(a)
# define __raw_readq(a) __readq(a)
# define __raw_writel(v,a) __writel((v),(a))
# define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* PYXIS */
#define __is_ioaddr(a) cia_is_ioaddr((unsigned long)(a))
#endif
/* __WANT_IO_DEF */
#define __IO_PREFIX cia
#endif
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_irongate.h
View file @
17647b1d
...
...
@@ -190,137 +190,37 @@ struct el_IRONGATE_sysdata_mcheck {
* K7 can only use linear accesses to get at PCI memory and I/O spaces.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
irongate_inb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)(
addr
+
IRONGATE_IO
));
}
__EXTERN_INLINE
void
irongate_outb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)(
addr
+
IRONGATE_IO
));
mb
();
}
__EXTERN_INLINE
u16
irongate_inw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)(
addr
+
IRONGATE_IO
));
}
__EXTERN_INLINE
void
irongate_outw
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)(
addr
+
IRONGATE_IO
));
mb
();
}
__EXTERN_INLINE
u32
irongate_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)(
addr
+
IRONGATE_IO
);
}
__EXTERN_INLINE
void
irongate_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)(
addr
+
IRONGATE_IO
)
=
b
;
mb
();
}
/*
* Memory functions. All accesses are done through linear space.
*/
__EXTERN_INLINE
u8
irongate_readb
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
irongate_ioportmap
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
return
(
void
__iomem
*
)(
addr
+
IRONGATE_IO
);
}
__EXTERN_INLINE
u16
irongate_readw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
extern
void
__iomem
*
irongate_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
irongate_iounmap
(
volatile
void
__iomem
*
addr
);
__EXTERN_INLINE
u32
irongate_readl
(
unsigned
long
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
}
__EXTERN_INLINE
u64
irongate_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
irongate_writeb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
irongate_writew
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
irongate_writel
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
irongate_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
irongate_is_ioaddr
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
addr
>=
IRONGATE_MEM
;
}
extern
unsigned
long
irongate_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
irongate_iounmap
(
unsigned
long
addr
);
__EXTERN_INLINE
int
irongate_is_ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
irongate_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
return
addr
>=
IRONGATE_MEM
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
return
addr
<
IRONGATE_IO
||
addr
>=
IRONGATE_CONF
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) irongate_inb((unsigned long)(p))
#define __inw(p) irongate_inw((unsigned long)(p))
#define __inl(p) irongate_inl((unsigned long)(p))
#define __outb(x,p) irongate_outb((x),(unsigned long)(p))
#define __outw(x,p) irongate_outw((x),(unsigned long)(p))
#define __outl(x,p) irongate_outl((x),(unsigned long)(p))
#define __readb(a) irongate_readb((unsigned long)(a))
#define __readw(a) irongate_readw((unsigned long)(a))
#define __readl(a) irongate_readl((unsigned long)(a))
#define __readq(a) irongate_readq((unsigned long)(a))
#define __writeb(x,a) irongate_writeb((x),(unsigned long)(a))
#define __writew(x,a) irongate_writew((x),(unsigned long)(a))
#define __writel(x,a) irongate_writel((x),(unsigned long)(a))
#define __writeq(x,a) irongate_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) irongate_ioremap((unsigned long)(a),(s))
#define __iounmap(a) irongate_iounmap((unsigned long)(a))
#define __is_ioaddr(a) irongate_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb((x),(p))
#define outw(x,p) __outw((x),(p))
#define outl(x,p) __outl((x),(p))
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb((v),(a))
#define __raw_writew(v,a) __writew((v),(a))
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX irongate
#define irongate_trivial_rw_bw 1
#define irongate_trivial_rw_lq 1
#define irongate_trivial_io_bw 1
#define irongate_trivial_io_lq 1
#define irongate_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_lca.h
View file @
17647b1d
...
...
@@ -215,145 +215,117 @@ union el_lca {
* data to/from the right byte-lanes.
*/
#define vip volatile int *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
#define vip volatile int
__force
*
#define vuip volatile unsigned int
__force
*
#define vulp volatile unsigned long
__force
*
__EXTERN_INLINE
u8
lca_inb
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
#define LCA_SET_HAE \
do { \
if (addr >= (1UL << 24)) { \
unsigned long msb = addr & 0xf8000000; \
addr -= msb; \
set_hae(msb); \
} \
} while (0)
__EXTERN_INLINE
void
lca_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
lca_inw
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
lca_outw
(
u16
b
,
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
lca_ioread8
(
void
__iomem
*
x
addr
)
{
unsigned
long
w
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x00
;
}
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x08
)
=
w
;
mb
();
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u32
lca_inl
(
unsigned
long
addr
)
__EXTERN_INLINE
void
lca_iowrite8
(
u8
b
,
void
__iomem
*
x
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x18
);
}
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x00
;
}
__EXTERN_INLINE
void
lca_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x18
)
=
b
;
mb
();
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
__EXTERN_INLINE
u8
lca_readb
(
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
lca_ioread16
(
void
__iomem
*
xaddr
)
{
unsigned
long
result
,
msb
;
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x08
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
lca_readw
(
unsigned
long
addr
)
{
unsigned
long
result
,
msb
;
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x08
);
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u32
lca_readl
(
unsigned
long
addr
)
__EXTERN_INLINE
void
lca_iowrite16
(
u16
b
,
void
__iomem
*
x
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
}
__EXTERN_INLINE
u64
lca_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
lca_writeb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
msb
;
unsigned
long
w
;
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x08
;
}
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x00
)
=
w
;
}
__EXTERN_INLINE
void
lca_writew
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
msb
;
unsigned
long
w
;
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
}
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x08
)
=
w
;
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
lca_writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
lca_ioread32
(
void
__iomem
*
x
addr
)
{
*
(
vuip
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
LCA_DENSE_MEM
)
addr
=
((
addr
-
LCA_IO
)
<<
5
)
+
LCA_IO
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
lca_
writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
lca_
iowrite32
(
u32
b
,
void
__iomem
*
x
addr
)
{
*
(
vulp
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
LCA_DENSE_MEM
)
addr
=
((
addr
-
LCA_IO
)
<<
5
)
+
LCA_IO
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
unsigned
long
lca_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
lca_ioportmap
(
unsigned
long
addr
)
{
return
addr
+
LCA_DENSE_MEM
;
return
(
void
__iomem
*
)(
addr
+
LCA_IO
)
;
}
__EXTERN_INLINE
void
lca_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
lca_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
LCA_DENSE_MEM
)
;
}
__EXTERN_INLINE
int
lca_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -361,36 +333,23 @@ __EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
return
addr
>=
IDENT_ADDR
+
0x120000000UL
;
}
__EXTERN_INLINE
int
lca_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
(
unsigned
long
)
addr
>=
LCA_DENSE_MEM
;
}
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) lca_inb((unsigned long)(p))
#define __inw(p) lca_inw((unsigned long)(p))
#define __inl(p) lca_inl((unsigned long)(p))
#define __outb(x,p) lca_outb((x),(unsigned long)(p))
#define __outw(x,p) lca_outw((x),(unsigned long)(p))
#define __outl(x,p) lca_outl((x),(unsigned long)(p))
#define __readb(a) lca_readb((unsigned long)(a))
#define __readw(a) lca_readw((unsigned long)(a))
#define __readl(a) lca_readl((unsigned long)(a))
#define __readq(a) lca_readq((unsigned long)(a))
#define __writeb(x,a) lca_writeb((x),(unsigned long)(a))
#define __writew(x,a) lca_writew((x),(unsigned long)(a))
#define __writel(x,a) lca_writel((x),(unsigned long)(a))
#define __writeq(x,a) lca_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) lca_ioremap((unsigned long)(a),(s))
#define __iounmap(a) lca_iounmap((unsigned long)(a))
#define __is_ioaddr(a) lca_is_ioaddr((unsigned long)(a))
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX lca
#define lca_trivial_rw_bw 2
#define lca_trivial_rw_lq 1
#define lca_trivial_io_bw 0
#define lca_trivial_io_lq 0
#define lca_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_marvel.h
View file @
17647b1d
...
...
@@ -325,249 +325,48 @@ struct io7 {
* I/O functions. All access through linear space.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
#ifdef CONFIG_VGA_HOSE
extern
struct
pci_controller
*
pci_vga_hose
;
# define __marvel_is_port_vga(a) \
(((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
# define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
# define FIXUP_IOADDR_VGA(a) do { \
if (pci_vga_hose && __marvel_is_port_vga(a)) \
a += pci_vga_hose->io_space->start; \
} while(0)
#else
# define FIXUP_IOADDR_VGA(a)
#endif
#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
#define FIXUP_IOADDR_LEGACY(a)
#define FIXUP_IOADDR(a) do { \
FIXUP_IOADDR_VGA(a); \
FIXUP_IOADDR_LEGACY(a); \
} while(0)
#if 0
# define IOBUG(x) printk x
# define IOBUG_FILTER_IOADDR(a, x) \
if (!__marvel_is_port_kbd(a) && !__marvel_is_port_rtc(a)) IOBUG(x)
#else
# define IOBUG(x)
# define IOBUG_FILTER_IOADDR(a, x)
#endif
extern
u8
__marvel_rtc_io
(
int
write
,
u8
b
,
unsigned
long
addr
);
#define __marvel_rtc_inb(a) __marvel_rtc_io(0, 0, (a))
#define __marvel_rtc_outb(b, a) __marvel_rtc_io(1, (b), (a))
__EXTERN_INLINE
int
marvel_is_ioaddr
(
unsigned
long
addr
)
{
return
(
addr
&
(
1UL
<<
40
))
!=
0
;
/*FIXME - hardwire*/
}
__EXTERN_INLINE
u8
marvel_inb
(
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
if
(
__marvel_is_port_kbd
(
addr
))
return
(
u8
)
0
;
if
(
__marvel_is_port_rtc
(
addr
))
return
__marvel_rtc_inb
(
addr
);
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
(
u8
)
-
1
;
}
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
marvel_outb
(
u8
b
,
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
if
(
__marvel_is_port_rtc
(
addr
))
return
(
void
)
__marvel_rtc_outb
(
b
,
addr
);
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
;
}
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
marvel_inw
(
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
(
u16
)
-
1
;
}
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
marvel_outw
(
u16
w
,
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
;
}
__kernel_stw
(
w
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
marvel_inl
(
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
(
u32
)
-
1
;
}
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
marvel_outl
(
u32
l
,
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
;
}
*
(
vuip
)
addr
=
l
;
mb
();
}
/*
* Memory functions. All accesses through linear space.
*/
extern
unsigned
long
marvel_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
marvel_iounmap
(
unsigned
long
addr
);
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
__EXTERN_INLINE
u8
marvel_readb
(
unsigned
long
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - reading -1
\n
"
,
addr
));
return
(
u8
)
-
1
;
}
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
extern
unsigned
int
marvel_ioread8
(
void
__iomem
*
);
extern
void
marvel_iowrite8
(
u8
b
,
void
__iomem
*
);
__EXTERN_INLINE
u
16
marvel_readw
(
unsigned
long
addr
)
__EXTERN_INLINE
u
nsigned
int
marvel_ioread16
(
void
__iomem
*
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - reading -1
\n
"
,
addr
));
return
(
u16
)
-
1
;
}
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
marvel_readl
(
unsigned
long
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - reading -1
\n
"
,
addr
));
return
(
u32
)
-
1
;
}
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
marvel_readq
(
unsigned
long
addr
)
__EXTERN_INLINE
void
marvel_iowrite16
(
u16
b
,
void
__iomem
*
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - reading -1
\n
"
,
addr
));
return
(
u64
)
-
1
;
}
return
*
(
vulp
)
addr
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
marvel_writeb
(
u8
b
,
unsigned
long
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - dropping store
\n
"
,
addr
));
return
;
}
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
marvel_writew
(
u16
w
,
unsigned
long
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - dropping store
\n
"
,
addr
));
return
;
}
__kernel_stw
(
w
,
*
(
vusp
)
addr
);
}
extern
void
__iomem
*
marvel_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
marvel_iounmap
(
volatile
void
__iomem
*
addr
);
extern
void
__iomem
*
marvel_ioportmap
(
unsigned
long
addr
);
__EXTERN_INLINE
void
marvel_writel
(
u32
l
,
unsigned
long
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - dropping store
\n
"
,
addr
));
return
;
}
*
(
vuip
)
addr
=
l
;
}
__EXTERN_INLINE
void
marvel_writeq
(
u64
q
,
unsigned
long
addr
)
__EXTERN_INLINE
int
marvel_is_ioaddr
(
unsigned
long
addr
)
{
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG
((
"Bad MEM addr %lx - dropping store
\n
"
,
addr
));
return
;
}
*
(
vulp
)
addr
=
q
;
return
(
addr
>>
40
)
&
1
;
}
#undef FIXUP_IOADDR
#undef FIXUP_IOADDR_LEGACY
#undef FIXUP_IOADDR_VGA
extern
int
marvel_is_mmio
(
const
volatile
void
__iomem
*
);
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) marvel_inb((unsigned long)(p))
#define __inw(p) marvel_inw((unsigned long)(p))
#define __inl(p) marvel_inl((unsigned long)(p))
#define __outb(x,p) marvel_outb((x),(unsigned long)(p))
#define __outw(x,p) marvel_outw((x),(unsigned long)(p))
#define __outl(x,p) marvel_outl((x),(unsigned long)(p))
#define __readb(a) marvel_readb((unsigned long)(a))
#define __readw(a) marvel_readw((unsigned long)(a))
#define __readl(a) marvel_readl((unsigned long)(a))
#define __readq(a) marvel_readq((unsigned long)(a))
#define __writeb(x,a) marvel_writeb((x),(unsigned long)(a))
#define __writew(x,a) marvel_writew((x),(unsigned long)(a))
#define __writel(x,a) marvel_writel((x),(unsigned long)(a))
#define __writeq(x,a) marvel_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) marvel_ioremap((unsigned long)(a),(s))
#define __iounmap(a) marvel_iounmap((unsigned long)(a))
#define __is_ioaddr(a) marvel_is_ioaddr((unsigned long)(a))
/* Disable direct inlining of these calls with the debug checks present. */
#if 0
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX marvel
#define marvel_trivial_rw_bw 1
#define marvel_trivial_rw_lq 1
#define marvel_trivial_io_bw 0
#define marvel_trivial_io_lq 1
#define marvel_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
# undef __EXTERN_INLINE
...
...
include/asm-alpha/core_mcpcia.h
View file @
17647b1d
...
...
@@ -211,91 +211,6 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
* Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vip volatile int *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
mcpcia_inb
(
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
result
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
hose
+=
MCPCIA_IO_BIAS
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
mcpcia_outb
(
u8
b
,
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
w
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
mcpcia_inw
(
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
result
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
mcpcia_outw
(
u16
b
,
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
w
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x08
)
=
w
;
mb
();
}
__EXTERN_INLINE
u32
mcpcia_inl
(
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
return
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x18
);
}
__EXTERN_INLINE
void
mcpcia_outl
(
u32
b
,
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x18
)
=
b
;
mb
();
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
...
...
@@ -328,149 +243,131 @@ __EXTERN_INLINE void mcpcia_outl(u32 b, unsigned long in_addr)
*
*/
__EXTERN_INLINE
unsigned
long
mcpcia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
{
return
addr
+
MCPCIA_MEM_BIAS
;
}
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
#ifdef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \
set_hae(hose & 0xffffffff); \
hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \
}
#else
#define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \
hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \
}
#endif
__EXTERN_INLINE
void
mcpcia_iounmap
(
unsigned
long
addr
)
static
inline
int
__mcpcia_is_mmio
(
unsigned
long
addr
)
{
return
;
return
(
addr
&
0x80000000UL
)
==
0
;
}
__EXTERN_INLINE
int
mcpcia_is_ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
unsigned
int
mcpcia_ioread8
(
void
__iomem
*
x
addr
)
{
return
addr
>=
MCPCIA_SPARSE
(
0
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
result
;
MCPCIA_FROB_MMIO
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u8
mcpcia_readb
(
unsigned
long
in_
addr
)
__EXTERN_INLINE
void
mcpcia_iowrite8
(
u8
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
in_addr
&
0xffffffffUL
;
unsigned
long
hose
=
in_addr
&
~
0xffffffffUL
;
unsigned
long
result
,
work
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
w
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
work
=
((
addr
<<
5
)
+
hose
+
0x00
);
result
=
*
(
vip
)
work
;
return
__kernel_extbl
(
result
,
addr
&
3
);
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x00
)
=
w
;
}
__EXTERN_INLINE
u
16
mcpcia_readw
(
unsigned
long
in_
addr
)
__EXTERN_INLINE
u
nsigned
int
mcpcia_ioread16
(
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
in_addr
&
0xffffffffUL
;
unsigned
long
hose
=
in_addr
&
~
0xffffffffUL
;
unsigned
long
result
,
work
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
result
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
work
=
((
addr
<<
5
)
+
hose
+
0x08
);
result
=
*
(
vip
)
work
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
mcpcia_
writeb
(
u8
b
,
unsigned
long
in_
addr
)
__EXTERN_INLINE
void
mcpcia_
iowrite16
(
u16
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
in_addr
&
0xffffffffUL
;
unsigned
long
hose
=
in_addr
&
~
0xffffffffUL
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
w
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
w
=
__kernel_insbl
(
b
,
in_addr
&
3
);
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x00
)
=
w
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x08
)
=
w
;
}
__EXTERN_INLINE
void
mcpcia_writew
(
u16
b
,
unsigned
long
in_
addr
)
__EXTERN_INLINE
unsigned
int
mcpcia_ioread32
(
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
in_addr
&
0xffffffffUL
;
unsigned
long
hose
=
in_addr
&
~
0xffffffffUL
;
unsigned
long
w
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
if
(
!
__mcpcia_is_mmio
(
addr
))
addr
=
((
addr
&
0xffff
)
<<
5
)
+
(
addr
&
~
0xfffful
)
+
0x18
;
w
=
__kernel_inswl
(
b
,
in_addr
&
3
);
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x08
)
=
w
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
mcpcia_iowrite32
(
u32
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
!
__mcpcia_is_mmio
(
addr
))
addr
=
((
addr
&
0xffff
)
<<
5
)
+
(
addr
&
~
0xfffful
)
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
u32
mcpcia_readl
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
mcpcia_ioportmap
(
unsigned
long
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
return
(
void
__iomem
*
)(
addr
+
MCPCIA_IO_BIAS
)
;
}
__EXTERN_INLINE
u64
mcpcia_readq
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
mcpcia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
*
(
vulp
)
addr
;
return
(
void
__iomem
*
)(
addr
+
MCPCIA_MEM_BIAS
)
;
}
__EXTERN_INLINE
void
mcpcia_writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
mcpcia_is_ioaddr
(
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
return
addr
>=
MCPCIA_SPARSE
(
0
)
;
}
__EXTERN_INLINE
void
mcpcia_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
mcpcia_is_mmio
(
const
volatile
void
__iomem
*
x
addr
)
{
*
(
vulp
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
return
__mcpcia_is_mmio
(
addr
);
}
#undef
vucp
#undef vusp
#undef
MCPCIA_FROB_MMIO
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) mcpcia_inb((unsigned long)(p))
#define __inw(p) mcpcia_inw((unsigned long)(p))
#define __inl(p) mcpcia_inl((unsigned long)(p))
#define __outb(x,p) mcpcia_outb((x),(unsigned long)(p))
#define __outw(x,p) mcpcia_outw((x),(unsigned long)(p))
#define __outl(x,p) mcpcia_outl((x),(unsigned long)(p))
#define __readb(a) mcpcia_readb((unsigned long)(a))
#define __readw(a) mcpcia_readw((unsigned long)(a))
#define __readl(a) mcpcia_readl((unsigned long)(a))
#define __readq(a) mcpcia_readq((unsigned long)(a))
#define __writeb(x,a) mcpcia_writeb((x),(unsigned long)(a))
#define __writew(x,a) mcpcia_writew((x),(unsigned long)(a))
#define __writel(x,a) mcpcia_writel((x),(unsigned long)(a))
#define __writeq(x,a) mcpcia_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) mcpcia_ioremap((unsigned long)(a),(s))
#define __iounmap(a) mcpcia_iounmap((unsigned long)(a))
#define __is_ioaddr(a) mcpcia_is_ioaddr((unsigned long)(a))
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX mcpcia
#define mcpcia_trivial_rw_bw 2
#define mcpcia_trivial_rw_lq 1
#define mcpcia_trivial_io_bw 0
#define mcpcia_trivial_io_lq 0
#define mcpcia_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_polaris.h
View file @
17647b1d
...
...
@@ -63,49 +63,6 @@ struct el_POLARIS_sysdata_mcheck {
* However, we will support only the BWX form.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
polaris_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
POLARIS I/O region, but that doesn't take care of legacy
ISA crap. */
return
__kernel_ldbu
(
*
(
vucp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
}
__EXTERN_INLINE
void
polaris_outb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
mb
();
}
__EXTERN_INLINE
u16
polaris_inw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
}
__EXTERN_INLINE
void
polaris_outw
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
mb
();
}
__EXTERN_INLINE
u32
polaris_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)(
addr
+
POLARIS_DENSE_IO_BASE
);
}
__EXTERN_INLINE
void
polaris_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)(
addr
+
POLARIS_DENSE_IO_BASE
)
=
b
;
mb
();
}
/*
* Memory functions. Polaris allows all accesses (byte/word
* as well as long/quad) to be done through dense space.
...
...
@@ -113,104 +70,35 @@ __EXTERN_INLINE void polaris_outl(u32 b, unsigned long addr)
* We will only support DENSE access via BWX insns.
*/
__EXTERN_INLINE
u8
polaris_readb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
polaris_readw
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
polaris_ioportmap
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
return
(
void
__iomem
*
)(
addr
+
POLARIS_DENSE_IO_BASE
);
}
__EXTERN_INLINE
u32
polaris_readl
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
polaris_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
return
(
void
__iomem
*
)(
addr
+
POLARIS_DENSE_MEM_BASE
)
;
}
__EXTERN_INLINE
u64
polaris_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
polaris_writeb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
polaris_writew
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
polaris_writel
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
polaris_writeq
(
u64
b
,
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
}
__EXTERN_INLINE
unsigned
long
polaris_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
{
return
addr
+
POLARIS_DENSE_MEM_BASE
;
}
__EXTERN_INLINE
void
polaris_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
int
polaris_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
POLARIS_SPARSE_MEM_BASE
;
}
__EXTERN_INLINE
int
polaris_is_
ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
polaris_is_
mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
POLARIS_SPARSE_MEM
_BASE
;
return
(
unsigned
long
)
addr
<
POLARIS_SPARSE_IO
_BASE
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) polaris_inb((unsigned long)(p))
#define __inw(p) polaris_inw((unsigned long)(p))
#define __inl(p) polaris_inl((unsigned long)(p))
#define __outb(x,p) polaris_outb((x),(unsigned long)(p))
#define __outw(x,p) polaris_outw((x),(unsigned long)(p))
#define __outl(x,p) polaris_outl((x),(unsigned long)(p))
#define __readb(a) polaris_readb((unsigned long)(a))
#define __readw(a) polaris_readw((unsigned long)(a))
#define __readl(a) polaris_readl((unsigned long)(a))
#define __readq(a) polaris_readq((unsigned long)(a))
#define __writeb(x,a) polaris_writeb((x),(unsigned long)(a))
#define __writew(x,a) polaris_writew((x),(unsigned long)(a))
#define __writel(x,a) polaris_writel((x),(unsigned long)(a))
#define __writeq(x,a) polaris_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) polaris_ioremap((unsigned long)(a),(s))
#define __iounmap(a) polaris_iounmap((unsigned long)(a))
#define __is_ioaddr(a) polaris_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb((x),(p))
#define outw(x,p) __outw((x),(p))
#define outl(x,p) __outl((x),(p))
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb((v),(a))
#define __raw_writew(v,a) __writew((v),(a))
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX polaris
#define polaris_trivial_rw_bw 1
#define polaris_trivial_rw_lq 1
#define polaris_trivial_io_bw 1
#define polaris_trivial_io_lq 1
#define polaris_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_t2.h
View file @
17647b1d
...
...
@@ -199,8 +199,8 @@ struct el_t2_procdata_mcheck {
struct
el_t2_logout_header
{
unsigned
int
elfl_size
;
/* size in bytes of logout area. */
int
elfl_sbz1
:
31
;
/* Should be zero. */
char
elfl_retry
:
1
;
/* Retry flag. */
unsigned
int
elfl_sbz1
:
31
;
/* Should be zero. */
unsigned
int
elfl_retry
:
1
;
/* Retry flag. */
unsigned
int
elfl_procoffset
;
/* Processor-specific offset. */
unsigned
int
elfl_sysoffset
;
/* Offset of system-specific. */
unsigned
int
elfl_error_type
;
/* PAL error type code. */
...
...
@@ -357,13 +357,13 @@ struct el_t2_frame_corrected {
#define vip volatile int *
#define vuip volatile unsigned int *
__EXTERN_INLINE
u8
t2_inb
(
unsigned
long
addr
)
static
inline
u8
t2_inb
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
T2_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
t2_outb
(
u8
b
,
unsigned
long
addr
)
static
inline
void
t2_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
...
...
@@ -372,13 +372,13 @@ __EXTERN_INLINE void t2_outb(u8 b, unsigned long addr)
mb
();
}
__EXTERN_INLINE
u16
t2_inw
(
unsigned
long
addr
)
static
inline
u16
t2_inw
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
T2_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
t2_outw
(
u16
b
,
unsigned
long
addr
)
static
inline
void
t2_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
...
...
@@ -387,12 +387,12 @@ __EXTERN_INLINE void t2_outw(u16 b, unsigned long addr)
mb
();
}
__EXTERN_INLINE
u32
t2_inl
(
unsigned
long
addr
)
static
inline
u32
t2_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
T2_IO
+
0x18
);
}
__EXTERN_INLINE
void
t2_outl
(
u32
b
,
unsigned
long
addr
)
static
inline
void
t2_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
T2_IO
+
0x18
)
=
b
;
mb
();
...
...
@@ -438,8 +438,9 @@ __EXTERN_INLINE void t2_outl(u32 b, unsigned long addr)
static
spinlock_t
t2_hae_lock
=
SPIN_LOCK_UNLOCKED
;
__EXTERN_INLINE
u8
t2_readb
(
unsigned
long
addr
)
__EXTERN_INLINE
u8
t2_readb
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -451,8 +452,9 @@ __EXTERN_INLINE u8 t2_readb(unsigned long addr)
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
t2_readw
(
unsigned
long
addr
)
__EXTERN_INLINE
u16
t2_readw
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -468,8 +470,9 @@ __EXTERN_INLINE u16 t2_readw(unsigned long addr)
* On SABLE with T2, we must use SPARSE memory even for 32-bit access,
* because we cannot access all of DENSE without changing its HAE.
*/
__EXTERN_INLINE
u32
t2_readl
(
unsigned
long
addr
)
__EXTERN_INLINE
u32
t2_readl
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -481,8 +484,9 @@ __EXTERN_INLINE u32 t2_readl(unsigned long addr)
return
result
&
0xffffffffUL
;
}
__EXTERN_INLINE
u64
t2_readq
(
unsigned
long
addr
)
__EXTERN_INLINE
u64
t2_readq
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
r0
,
r1
,
work
,
msb
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -496,8 +500,9 @@ __EXTERN_INLINE u64 t2_readq(unsigned long addr)
return
r1
<<
32
|
r0
;
}
__EXTERN_INLINE
void
t2_writeb
(
u8
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
t2_writeb
(
u8
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
,
w
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -509,8 +514,9 @@ __EXTERN_INLINE void t2_writeb(u8 b, unsigned long addr)
spin_unlock_irqrestore
(
&
t2_hae_lock
,
flags
);
}
__EXTERN_INLINE
void
t2_writew
(
u16
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
t2_writew
(
u16
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
,
w
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -526,8 +532,9 @@ __EXTERN_INLINE void t2_writew(u16 b, unsigned long addr)
* On SABLE with T2, we must use SPARSE memory even for 32-bit access,
* because we cannot access all of DENSE without changing its HAE.
*/
__EXTERN_INLINE
void
t2_writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
t2_writel
(
u32
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -538,8 +545,9 @@ __EXTERN_INLINE void t2_writel(u32 b, unsigned long addr)
spin_unlock_irqrestore
(
&
t2_hae_lock
,
flags
);
}
__EXTERN_INLINE
void
t2_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
t2_writeq
(
u64
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
,
work
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
t2_hae_lock
,
flags
);
...
...
@@ -552,16 +560,15 @@ __EXTERN_INLINE void t2_writeq(u64 b, unsigned long addr)
spin_unlock_irqrestore
(
&
t2_hae_lock
,
flags
);
}
__EXTERN_INLINE
unsigned
long
t2_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
t2_ioportmap
(
unsigned
long
addr
)
{
return
addr
;
return
(
void
__iomem
*
)(
addr
+
T2_IO
)
;
}
__EXTERN_INLINE
void
t2_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
t2_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
T2_DENSE_MEM
)
;
}
__EXTERN_INLINE
int
t2_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -569,30 +576,47 @@ __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr)
return
(
long
)
addr
>=
0
;
}
__EXTERN_INLINE
int
t2_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
(
unsigned
long
)
addr
>=
T2_DENSE_MEM
;
}
/* New-style ioread interface. The mmio routines are so ugly for T2 that
it doesn't make sense to merge the pio and mmio routines. */
#define IOPORT(OS, NS) \
__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \
{ \
if (t2_is_mmio(xaddr)) \
return t2_read##OS(xaddr - T2_DENSE_MEM); \
else \
return t2_in##OS((unsigned long)xaddr - T2_IO); \
} \
__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \
{ \
if (t2_is_mmio(xaddr)) \
t2_write##OS(b, xaddr - T2_DENSE_MEM); \
else \
t2_out##OS(b, (unsigned long)xaddr - T2_IO); \
}
IOPORT
(
b
,
8
)
IOPORT
(
w
,
16
)
IOPORT
(
l
,
32
)
#undef IOPORT
#undef vip
#undef vuip
#ifdef __WANT_IO_DEF
#define __inb(p) t2_inb((unsigned long)(p))
#define __inw(p) t2_inw((unsigned long)(p))
#define __inl(p) t2_inl((unsigned long)(p))
#define __outb(x,p) t2_outb((x),(unsigned long)(p))
#define __outw(x,p) t2_outw((x),(unsigned long)(p))
#define __outl(x,p) t2_outl((x),(unsigned long)(p))
#define __readb(a) t2_readb((unsigned long)(a))
#define __readw(a) t2_readw((unsigned long)(a))
#define __readl(a) t2_readl((unsigned long)(a))
#define __readq(a) t2_readq((unsigned long)(a))
#define __writeb(x,a) t2_writeb((x),(unsigned long)(a))
#define __writew(x,a) t2_writew((x),(unsigned long)(a))
#define __writel(x,a) t2_writel((x),(unsigned long)(a))
#define __writeq(x,a) t2_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) t2_ioremap((unsigned long)(a),(s))
#define __iounmap(a) t2_iounmap((unsigned long)(a))
#define __is_ioaddr(a) t2_is_ioaddr((unsigned long)(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX t2
#define t2_trivial_rw_bw 0
#define t2_trivial_rw_lq 0
#define t2_trivial_io_bw 0
#define t2_trivial_io_lq 0
#define t2_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_titan.h
View file @
17647b1d
...
...
@@ -377,149 +377,33 @@ struct el_PRIVATEER_envdata_mcheck {
* can only use linear accesses to get at PCI/AGP memory and I/O spaces.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
titan_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
addr
+=
TITAN_IO_BIAS
;
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
titan_outb
(
u8
b
,
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
titan_inw
(
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
titan_outw
(
u16
b
,
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
titan_inl
(
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
titan_outl
(
u32
b
,
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
*
(
vuip
)
addr
=
b
;
mb
();
}
/*
* Memory functions. all accesses are done through linear space.
*/
extern
unsigned
long
titan_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
titan_iounmap
(
unsigned
long
addr
);
__EXTERN_INLINE
int
titan_is_ioaddr
(
unsigned
long
addr
)
{
return
addr
>=
TITAN_BASE
;
}
__EXTERN_INLINE
u8
titan_readb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
titan_readw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
titan_readl
(
unsigned
long
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
}
__EXTERN_INLINE
u64
titan_readq
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
titan_ioportmap
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
return
(
void
__iomem
*
)(
addr
+
TITAN_IO_BIAS
)
;
}
__EXTERN_INLINE
void
titan_writeb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
extern
void
__iomem
*
titan_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
titan_iounmap
(
volatile
void
__iomem
*
addr
);
__EXTERN_INLINE
void
titan_writew
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
titan_writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
titan_is_ioaddr
(
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
return
addr
>=
TITAN_BASE
;
}
__EXTERN_INLINE
void
titan_writeq
(
u64
b
,
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
}
extern
int
titan_is_mmio
(
const
volatile
void
__iomem
*
addr
);
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) titan_inb((unsigned long)(p))
#define __inw(p) titan_inw((unsigned long)(p))
#define __inl(p) titan_inl((unsigned long)(p))
#define __outb(x,p) titan_outb((x),(unsigned long)(p))
#define __outw(x,p) titan_outw((x),(unsigned long)(p))
#define __outl(x,p) titan_outl((x),(unsigned long)(p))
#define __readb(a) titan_readb((unsigned long)(a))
#define __readw(a) titan_readw((unsigned long)(a))
#define __readl(a) titan_readl((unsigned long)(a))
#define __readq(a) titan_readq((unsigned long)(a))
#define __writeb(x,a) titan_writeb((x),(unsigned long)(a))
#define __writew(x,a) titan_writew((x),(unsigned long)(a))
#define __writel(x,a) titan_writel((x),(unsigned long)(a))
#define __writeq(x,a) titan_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) titan_ioremap((unsigned long)(a),(s))
#define __iounmap(a) titan_iounmap((unsigned long)(a))
#define __is_ioaddr(a) titan_is_ioaddr((unsigned long)(a))
#define inb(port) __inb((port))
#define inw(port) __inw((port))
#define inl(port) __inl((port))
#define outb(v, port) __outb((v),(port))
#define outw(v, port) __outw((v),(port))
#define outl(v, port) __outl((v),(port))
#define __raw_readb(a) __readb((unsigned long)(a))
#define __raw_readw(a) __readw((unsigned long)(a))
#define __raw_readl(a) __readl((unsigned long)(a))
#define __raw_readq(a) __readq((unsigned long)(a))
#define __raw_writeb(v,a) __writeb((v),(unsigned long)(a))
#define __raw_writew(v,a) __writew((v),(unsigned long)(a))
#define __raw_writel(v,a) __writel((v),(unsigned long)(a))
#define __raw_writeq(v,a) __writeq((v),(unsigned long)(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX titan
#define titan_trivial_rw_bw 1
#define titan_trivial_rw_lq 1
#define titan_trivial_io_bw 1
#define titan_trivial_io_lq 1
#define titan_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_tsunami.h
View file @
17647b1d
...
...
@@ -299,69 +299,19 @@ struct el_TSUNAMI_sysdata_mcheck {
* can only use linear accesses to get at PCI memory and I/O spaces.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
tsunami_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
addr
+=
TSUNAMI_IO_BIAS
;
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_outb
(
u8
b
,
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
tsunami_inw
(
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_outw
(
u16
b
,
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
tsunami_inl
(
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
tsunami_outl
(
u32
b
,
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
*
(
vuip
)
addr
=
b
;
mb
();
}
/*
* Memory functions. all accesses are done through linear space.
*/
__EXTERN_INLINE
unsigned
long
tsunami_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
tsunami_ioportmap
(
unsigned
long
addr
)
{
return
addr
+
TSUNAMI_MEM_BIAS
;
return
(
void
__iomem
*
)(
addr
+
TSUNAMI_IO_BIAS
)
;
}
__EXTERN_INLINE
void
tsunami_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
tsunami_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
TSUNAMI_MEM_BIAS
)
;
}
__EXTERN_INLINE
int
tsunami_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -369,87 +319,20 @@ __EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr)
return
addr
>=
TSUNAMI_BASE
;
}
__EXTERN_INLINE
u8
tsunami_readb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
tsunami_readw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
tsunami_readl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
tsunami_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
tsunami_writeb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_writew
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_writel
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
tsunami_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
tsunami_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
*
(
vulp
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
return
(
addr
&
0x100000000UL
)
==
0
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) tsunami_inb((unsigned long)(p))
#define __inw(p) tsunami_inw((unsigned long)(p))
#define __inl(p) tsunami_inl((unsigned long)(p))
#define __outb(x,p) tsunami_outb((x),(unsigned long)(p))
#define __outw(x,p) tsunami_outw((x),(unsigned long)(p))
#define __outl(x,p) tsunami_outl((x),(unsigned long)(p))
#define __readb(a) tsunami_readb((unsigned long)(a))
#define __readw(a) tsunami_readw((unsigned long)(a))
#define __readl(a) tsunami_readl((unsigned long)(a))
#define __readq(a) tsunami_readq((unsigned long)(a))
#define __writeb(x,a) tsunami_writeb((x),(unsigned long)(a))
#define __writew(x,a) tsunami_writew((x),(unsigned long)(a))
#define __writel(x,a) tsunami_writel((x),(unsigned long)(a))
#define __writeq(x,a) tsunami_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) tsunami_ioremap((unsigned long)(a),(s))
#define __iounmap(a) tsunami_iounmap((unsigned long)(a))
#define __is_ioaddr(a) tsunami_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb((x),(p))
#define outw(x,p) __outw((x),(p))
#define outl(x,p) __outl((x),(p))
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb((v),(a))
#define __raw_writew(v,a) __writew((v),(a))
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX tsunami
#define tsunami_trivial_rw_bw 1
#define tsunami_trivial_rw_lq 1
#define tsunami_trivial_io_bw 1
#define tsunami_trivial_io_lq 1
#define tsunami_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_wildfire.h
View file @
17647b1d
...
...
@@ -273,69 +273,19 @@ typedef struct {
#define __IO_EXTERN_INLINE
#endif
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
wildfire_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
addr
+=
WILDFIRE_IO_BIAS
;
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_outb
(
u8
b
,
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
wildfire_inw
(
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_outw
(
u16
b
,
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
wildfire_inl
(
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
wildfire_outl
(
u32
b
,
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
*
(
vuip
)
addr
=
b
;
mb
();
}
/*
* Memory functions. all accesses are done through linear space.
*/
__EXTERN_INLINE
unsigned
long
wildfire_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
wildfire_ioportmap
(
unsigned
long
addr
)
{
return
addr
+
WILDFIRE_MEM_BIAS
;
return
(
void
__iomem
*
)(
addr
+
WILDFIRE_IO_BIAS
)
;
}
__EXTERN_INLINE
void
wildfire_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
wildfire_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
WILDFIRE_MEM_BIAS
)
;
}
__EXTERN_INLINE
int
wildfire_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -343,87 +293,20 @@ __EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr)
return
addr
>=
WILDFIRE_BASE
;
}
__EXTERN_INLINE
u8
wildfire_readb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
wildfire_readw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
wildfire_readl
(
unsigned
long
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
}
__EXTERN_INLINE
u64
wildfire_readq
(
unsigned
long
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
wildfire_writeb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_writew
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_writel
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
wildfire_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
int
wildfire_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
*
(
vulp
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
addr
;
return
(
addr
&
0x100000000UL
)
==
0
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) wildfire_inb((unsigned long)(p))
#define __inw(p) wildfire_inw((unsigned long)(p))
#define __inl(p) wildfire_inl((unsigned long)(p))
#define __outb(x,p) wildfire_outb((x),(unsigned long)(p))
#define __outw(x,p) wildfire_outw((x),(unsigned long)(p))
#define __outl(x,p) wildfire_outl((x),(unsigned long)(p))
#define __readb(a) wildfire_readb((unsigned long)(a))
#define __readw(a) wildfire_readw((unsigned long)(a))
#define __readl(a) wildfire_readl((unsigned long)(a))
#define __readq(a) wildfire_readq((unsigned long)(a))
#define __writeb(x,a) wildfire_writeb((x),(unsigned long)(a))
#define __writew(x,a) wildfire_writew((x),(unsigned long)(a))
#define __writel(x,a) wildfire_writel((x),(unsigned long)(a))
#define __writeq(x,a) wildfire_writeq((x),(unsigned long)(a))
#define __ioremap(a,s) wildfire_ioremap((unsigned long)(a),(s))
#define __iounmap(a) wildfire_iounmap((unsigned long)(a))
#define __is_ioaddr(a) wildfire_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb((x),(p))
#define outw(x,p) __outw((x),(p))
#define outl(x,p) __outl((x),(p))
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb((v),(a))
#define __raw_writew(v,a) __writew((v),(a))
#define __raw_writel(v,a) __writel((v),(a))
#define __raw_writeq(v,a) __writeq((v),(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX wildfire
#define wildfire_trivial_rw_bw 1
#define wildfire_trivial_rw_lq 1
#define wildfire_trivial_io_bw 1
#define wildfire_trivial_io_lq 1
#define wildfire_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/io.h
View file @
17647b1d
#ifndef __ALPHA_IO_H
#define __ALPHA_IO_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/compiler.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
/* The generic header contains only prototypes. Including it ensures that
the implementation we have here matches that interface. */
#include <asm-generic/iomap.h>
/* We don't use IO slowdowns on the Alpha, but.. */
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
...
...
@@ -14,14 +28,6 @@
#define IDENT_ADDR 0xfffffc0000000000UL
#endif
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
/*
* We try to avoid hae updates (thus the cache), but when we
* do need to update the hae, we need to do it atomically, so
...
...
@@ -88,6 +94,9 @@ static inline void * phys_to_virt(unsigned long address)
/* This depends on working iommu. */
#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff
/*
* Change addresses as seen by the kernel (virtual) to addresses as
* seen by a device (bus), and vice versa.
...
...
@@ -118,67 +127,81 @@ static inline void *bus_to_virt(unsigned long address)
return
(
long
)
address
<=
0
?
NULL
:
virt
;
}
#else
/* !__KERNEL__ */
/*
* Define actual functions in private name-space so it's easier to
* accommodate things like XFree or svgalib that like to define their
* own versions of inb etc.
*/
extern
void
__sethae
(
unsigned
long
addr
);
/* syscall */
extern
void
_sethae
(
unsigned
long
addr
);
/* cached version */
#endif
/* !__KERNEL__ */
/*
* There are different chipsets to interface the Alpha CPUs to the world.
*/
#ifdef __KERNEL__
#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
#define _IO_CONCAT(a,b) a ## _ ## b
#ifdef CONFIG_ALPHA_GENERIC
/* In a generic kernel, we always go through the machine vector. */
# define __inb(p) alpha_mv.mv_inb((unsigned long)(p))
# define __inw(p) alpha_mv.mv_inw((unsigned long)(p))
# define __inl(p) alpha_mv.mv_inl((unsigned long)(p))
# define __outb(x,p) alpha_mv.mv_outb((x),(unsigned long)(p))
# define __outw(x,p) alpha_mv.mv_outw((x),(unsigned long)(p))
# define __outl(x,p) alpha_mv.mv_outl((x),(unsigned long)(p))
# define __readb(a) alpha_mv.mv_readb((unsigned long)(a))
# define __readw(a) alpha_mv.mv_readw((unsigned long)(a))
# define __readl(a) alpha_mv.mv_readl((unsigned long)(a))
# define __readq(a) alpha_mv.mv_readq((unsigned long)(a))
# define __writeb(v,a) alpha_mv.mv_writeb((v),(unsigned long)(a))
# define __writew(v,a) alpha_mv.mv_writew((v),(unsigned long)(a))
# define __writel(v,a) alpha_mv.mv_writel((v),(unsigned long)(a))
# define __writeq(v,a) alpha_mv.mv_writeq((v),(unsigned long)(a))
# define __ioremap(a,s) alpha_mv.mv_ioremap((unsigned long)(a),(s))
# define __iounmap(a) alpha_mv.mv_iounmap((unsigned long)(a))
# define __is_ioaddr(a) alpha_mv.mv_is_ioaddr((unsigned long)(a))
# define inb __inb
# define inw __inw
# define inl __inl
# define outb __outb
# define outw __outw
# define outl __outl
# define __raw_readb __readb
# define __raw_readw __readw
# define __raw_readl __readl
# define __raw_readq __readq
# define __raw_writeb __writeb
# define __raw_writew __writew
# define __raw_writel __writel
# define __raw_writeq __writeq
#define REMAP1(TYPE, NAME, QUAL) \
static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
{ \
return alpha_mv.mv_##NAME(addr); \
}
#define REMAP2(TYPE, NAME, QUAL) \
static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
{ \
alpha_mv.mv_##NAME(b, addr); \
}
#else
REMAP1
(
unsigned
int
,
ioread8
,
/**/
)
REMAP1
(
unsigned
int
,
ioread16
,
/**/
)
REMAP1
(
unsigned
int
,
ioread32
,
/**/
)
REMAP1
(
u8
,
readb
,
const
volatile
)
REMAP1
(
u16
,
readw
,
const
volatile
)
REMAP1
(
u32
,
readl
,
const
volatile
)
REMAP1
(
u64
,
readq
,
const
volatile
)
REMAP2
(
u8
,
iowrite8
,
/**/
)
REMAP2
(
u16
,
iowrite16
,
/**/
)
REMAP2
(
u32
,
iowrite32
,
/**/
)
REMAP2
(
u8
,
writeb
,
volatile
)
REMAP2
(
u16
,
writew
,
volatile
)
REMAP2
(
u32
,
writel
,
volatile
)
REMAP2
(
u64
,
writeq
,
volatile
)
#undef REMAP1
#undef REMAP2
static
inline
void
__iomem
*
generic_ioportmap
(
unsigned
long
a
)
{
return
alpha_mv
.
mv_ioportmap
(
a
);
}
static
inline
void
__iomem
*
generic_ioremap
(
unsigned
long
a
,
unsigned
long
s
)
{
return
alpha_mv
.
mv_ioremap
(
a
,
s
);
}
static
inline
void
generic_iounmap
(
volatile
void
__iomem
*
a
)
{
return
alpha_mv
.
mv_iounmap
(
a
);
}
static
inline
int
generic_is_ioaddr
(
unsigned
long
a
)
{
return
alpha_mv
.
mv_is_ioaddr
(
a
);
}
/* Control how and what gets defined within the core logic headers. */
#define __WANT_IO_DEF
static
inline
int
generic_is_mmio
(
const
volatile
void
__iomem
*
a
)
{
return
alpha_mv
.
mv_is_mmio
(
a
);
}
#define __IO_PREFIX generic
#define generic_trivial_rw_bw 0
#define generic_trivial_rw_lq 0
#define generic_trivial_io_bw 0
#define generic_trivial_io_lq 0
#define generic_trivial_iounmap 0
#else
#if defined(CONFIG_ALPHA_APECS)
# include <asm/core_apecs.h>
...
...
@@ -208,245 +231,280 @@ extern void _sethae (unsigned long addr); /* cached version */
#error "What system is this?"
#endif
#undef __WANT_IO_DEF
#endif
/* GENERIC */
#endif
/* __KERNEL__ */
/*
* The convention used for inb/outb etc. is that names starting with
* two underscores are the inline versions, names starting with a
* single underscore are proper functions, and names starting with a
* letter are macros that map in some way to inline or proper function
* versions. Not all that pretty, but before you change it, be sure
* to convince yourself that it won't break anything (in particular
* module support).
* We always have external versions of these routines.
*/
extern
u8
_inb
(
unsigned
long
port
);
extern
u16
_inw
(
unsigned
long
port
);
extern
u32
_inl
(
unsigned
long
port
);
extern
void
_outb
(
u8
b
,
unsigned
long
port
);
extern
void
_outw
(
u16
w
,
unsigned
long
port
);
extern
void
_outl
(
u32
l
,
unsigned
long
port
);
extern
u8
_readb
(
unsigned
long
addr
);
extern
u16
_readw
(
unsigned
long
addr
);
extern
u32
_readl
(
unsigned
long
addr
);
extern
u64
_readq
(
unsigned
long
addr
);
extern
void
_writeb
(
u8
b
,
unsigned
long
addr
);
extern
void
_writew
(
u16
b
,
unsigned
long
addr
);
extern
void
_writel
(
u32
b
,
unsigned
long
addr
);
extern
void
_writeq
(
u64
b
,
unsigned
long
addr
);
extern
u8
inb
(
unsigned
long
port
);
extern
u16
inw
(
unsigned
long
port
);
extern
u32
inl
(
unsigned
long
port
);
extern
void
outb
(
u8
b
,
unsigned
long
port
);
extern
void
outw
(
u16
b
,
unsigned
long
port
);
extern
void
outl
(
u32
b
,
unsigned
long
port
);
extern
u8
readb
(
const
volatile
void
__iomem
*
addr
);
extern
u16
readw
(
const
volatile
void
__iomem
*
addr
);
extern
u32
readl
(
const
volatile
void
__iomem
*
addr
);
extern
u64
readq
(
const
volatile
void
__iomem
*
addr
);
extern
void
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
);
extern
void
writew
(
u16
b
,
volatile
void
__iomem
*
addr
);
extern
void
writel
(
u32
b
,
volatile
void
__iomem
*
addr
);
extern
void
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
);
extern
u8
__raw_readb
(
const
volatile
void
__iomem
*
addr
);
extern
u16
__raw_readw
(
const
volatile
void
__iomem
*
addr
);
extern
u32
__raw_readl
(
const
volatile
void
__iomem
*
addr
);
extern
u64
__raw_readq
(
const
volatile
void
__iomem
*
addr
);
extern
void
__raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
);
extern
void
__raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
);
extern
void
__raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
);
extern
void
__raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
);
#ifdef __KERNEL__
/*
* The platform header files may define some of these macros to use
* the inlined versions where appropriate. These macros may also be
* redefined by userlevel programs.
* Mapping from port numbers to __iomem space is pretty easy.
*/
#ifndef inb
# define inb(p) _inb(p)
#endif
#ifndef inw
# define inw(p) _inw(p)
#endif
#ifndef inl
# define inl(p) _inl(p)
#endif
#ifndef outb
# define outb(b,p) _outb((b),(p))
#endif
#ifndef outw
# define outw(w,p) _outw((w),(p))
#endif
#ifndef outl
# define outl(l,p) _outl((l),(p))
#endif
#ifndef inb_p
# define inb_p inb
#endif
#ifndef inw_p
# define inw_p inw
#endif
#ifndef inl_p
# define inl_p inl
#endif
/* These two have to be extern inline so that we don't get redefinition
errors building lib/iomap.c. Which we don't want anyway, but... */
extern
inline
void
__iomem
*
ioport_map
(
unsigned
long
port
,
unsigned
int
size
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioportmap
)
(
port
);
}
#ifndef outb_p
# define outb_p outb
#endif
#ifndef outw_p
# define outw_p outw
#endif
#ifndef outl_p
# define outl_p outl
#endif
extern
inline
void
ioport_unmap
(
void
__iomem
*
addr
)
{
}
#define IO_SPACE_LIMIT 0xffff
static
inline
void
__iomem
*
ioremap
(
unsigned
long
port
,
unsigned
long
size
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioremap
)
(
port
,
size
);
}
#else
static
inline
void
__iomem
*
ioremap_nocache
(
unsigned
long
offset
,
unsigned
long
size
)
{
return
ioremap
(
offset
,
size
);
}
/* Userspace declarations. Kill in 2.5. */
static
inline
void
iounmap
(
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iounmap
)(
addr
);
}
extern
unsigned
int
inb
(
unsigned
long
port
);
extern
unsigned
int
inw
(
unsigned
long
port
);
extern
unsigned
int
inl
(
unsigned
long
port
);
extern
void
outb
(
unsigned
char
b
,
unsigned
long
port
);
extern
void
outw
(
unsigned
short
w
,
unsigned
long
port
);
extern
void
outl
(
unsigned
int
l
,
unsigned
long
port
);
extern
unsigned
long
readb
(
unsigned
long
addr
);
extern
unsigned
long
readw
(
unsigned
long
addr
);
extern
unsigned
long
readl
(
unsigned
long
addr
);
extern
void
writeb
(
unsigned
char
b
,
unsigned
long
addr
);
extern
void
writew
(
unsigned
short
b
,
unsigned
long
addr
);
extern
void
writel
(
unsigned
int
b
,
unsigned
long
addr
);
static
inline
int
__is_ioaddr
(
unsigned
long
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
is_ioaddr
)(
addr
);
}
#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
#endif
/* __KERNEL__ */
static
inline
int
__is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
is_mmio
)(
addr
);
}
#ifdef __KERNEL__
/*
* On Alpha, we have the whole of I/O space mapped at all times, but
* at odd and sometimes discontinuous addresses. Note that the
* discontinuities are all across busses, so we need not care for that
* for any one device.
*
* The DRM drivers need to be able to map contiguously a (potentially)
* discontiguous set of I/O pages. This set of pages is scatter-gather
* mapped contiguously from the perspective of the bus, but we can't
* directly access DMA addresses from the CPU, these addresses need to
* have a real ioremap. Therefore, iounmap and the size argument to
* ioremap are needed to give the platforms the ability to fully implement
* ioremap.
*
* Map the I/O space address into the kernel's virtual address space.
* If the actual I/O bits are sufficiently trivial, then expand inline.
*/
static
inline
void
*
ioremap
(
unsigned
long
offset
,
unsigned
long
size
)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern
inline
unsigned
int
ioread8
(
void
__iomem
*
addr
)
{
return
(
void
*
)
__ioremap
(
offset
,
size
);
}
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)(
addr
);
mb
();
return
ret
;
}
static
inline
void
iounmap
(
void
*
addr
)
extern
inline
unsigned
int
ioread16
(
void
__iomem
*
addr
)
{
__iounmap
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)(
addr
);
mb
();
return
ret
;
}
static
inline
void
*
ioremap_nocache
(
unsigned
long
offset
,
unsigned
long
size
)
extern
inline
void
iowrite8
(
u8
b
,
void
__iomem
*
addr
)
{
return
ioremap
(
offset
,
size
);
}
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
b
,
addr
);
mb
();
}
/* Indirect back to the macros provided. */
extern
inline
void
iowrite16
(
u16
b
,
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
b
,
addr
);
mb
();
}
extern
u8
___raw_readb
(
unsigned
long
addr
);
extern
u16
___raw_readw
(
unsigned
long
addr
);
extern
u32
___raw_readl
(
unsigned
long
addr
);
extern
u64
___raw_readq
(
unsigned
long
addr
);
extern
void
___raw_writeb
(
u8
b
,
unsigned
long
addr
);
extern
void
___raw_writew
(
u16
b
,
unsigned
long
addr
);
extern
void
___raw_writel
(
u32
b
,
unsigned
long
addr
);
extern
void
___raw_writeq
(
u64
b
,
unsigned
long
addr
);
extern
inline
u8
inb
(
unsigned
long
port
)
{
return
ioread8
(
ioport_map
(
port
,
1
));
}
#ifdef __raw_readb
# define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; })
#endif
#ifdef __raw_readw
# define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; })
#endif
#ifdef __raw_readl
# define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; })
#endif
#ifdef __raw_readq
# define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; })
#endif
extern
inline
u16
inw
(
unsigned
long
port
)
{
return
ioread16
(
ioport_map
(
port
,
2
));
}
#ifdef __raw_writeb
# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); })
#endif
#ifdef __raw_writew
# define writew(v,a) ({ __raw_writew((v),(a)); mb(); })
#endif
#ifdef __raw_writel
# define writel(v,a) ({ __raw_writel((v),(a)); mb(); })
#endif
#ifdef __raw_writeq
# define writeq(v,a) ({ __raw_writeq((v),(a)); mb(); })
#endif
extern
inline
void
outb
(
u8
b
,
unsigned
long
port
)
{
iowrite8
(
b
,
ioport_map
(
port
,
1
));
}
#ifndef __raw_readb
# define __raw_readb(a) ___raw_readb((unsigned long)(a))
#endif
#ifndef __raw_readw
# define __raw_readw(a) ___raw_readw((unsigned long)(a))
#endif
#ifndef __raw_readl
# define __raw_readl(a) ___raw_readl((unsigned long)(a))
#endif
#ifndef __raw_readq
# define __raw_readq(a) ___raw_readq((unsigned long)(a))
extern
inline
void
outw
(
u16
b
,
unsigned
long
port
)
{
iowrite16
(
b
,
ioport_map
(
port
,
2
));
}
#endif
#ifndef __raw_writeb
# define __raw_writeb(v,a) ___raw_writeb((v),(unsigned long)(a))
#endif
#ifndef __raw_writew
# define __raw_writew(v,a) ___raw_writew((v),(unsigned long)(a))
#endif
#ifndef __raw_writel
# define __raw_writel(v,a) ___raw_writel((v),(unsigned long)(a))
#endif
#ifndef __raw_writeq
# define __raw_writeq(v,a) ___raw_writeq((v),(unsigned long)(a))
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern
inline
unsigned
int
ioread32
(
void
__iomem
*
addr
)
{
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread32
)(
addr
);
mb
();
return
ret
;
}
#ifndef readb
# define readb(a) _readb((unsigned long)(a))
#endif
#ifndef readw
# define readw(a) _readw((unsigned long)(a))
#endif
#ifndef readl
# define readl(a) _readl((unsigned long)(a))
#endif
#ifndef readq
# define readq(a) _readq((unsigned long)(a))
#endif
extern
inline
void
iowrite32
(
u32
b
,
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite32
)(
b
,
addr
);
mb
();
}
#define readb_relaxed(addr) readb(addr
)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define readq_relaxed(addr) readq(addr)
extern
inline
u32
inl
(
unsigned
long
port
)
{
return
ioread32
(
ioport_map
(
port
,
4
));
}
#ifndef writeb
# define writeb(v,a) _writeb((v),(unsigned long)(a))
#endif
#ifndef writew
# define writew(v,a) _writew((v),(unsigned long)(a))
extern
inline
void
outl
(
u32
b
,
unsigned
long
port
)
{
iowrite32
(
b
,
ioport_map
(
port
,
4
));
}
#endif
#ifndef writel
# define writel(v,a) _writel((v),(unsigned long)(a))
#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
extern
inline
u8
__raw_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
addr
);
}
extern
inline
u16
__raw_readw
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
addr
);
}
extern
inline
void
__raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
b
,
addr
);
}
extern
inline
void
__raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
b
,
addr
);
}
extern
inline
u8
readb
(
const
volatile
void
__iomem
*
addr
)
{
u8
ret
=
__raw_readb
(
addr
);
mb
();
return
ret
;
}
extern
inline
u16
readw
(
const
volatile
void
__iomem
*
addr
)
{
u16
ret
=
__raw_readw
(
addr
);
mb
();
return
ret
;
}
extern
inline
void
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writeb
(
b
,
addr
);
mb
();
}
extern
inline
void
writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writew
(
b
,
addr
);
mb
();
}
#endif
#ifndef writeq
# define writeq(v,a) _writeq((v),(unsigned long)(a))
#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
extern
inline
u32
__raw_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readl
)(
addr
);
}
extern
inline
u64
__raw_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readq
)(
addr
);
}
extern
inline
void
__raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writel
)(
b
,
addr
);
}
extern
inline
void
__raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeq
)(
b
,
addr
);
}
extern
inline
u32
readl
(
const
volatile
void
__iomem
*
addr
)
{
u32
ret
=
__raw_readl
(
addr
);
mb
();
return
ret
;
}
extern
inline
u64
readq
(
const
volatile
void
__iomem
*
addr
)
{
u64
ret
=
__raw_readq
(
addr
);
mb
();
return
ret
;
}
extern
inline
void
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writel
(
b
,
addr
);
mb
();
}
extern
inline
void
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writeq
(
b
,
addr
);
mb
();
}
#endif
#define inb_p inb
#define inw_p inw
#define inl_p inl
#define outb_p outb
#define outw_p outw
#define outl_p outl
#define readb_relaxed(addr) __raw_readb(addr)
#define readw_relaxed(addr) __raw_readw(addr)
#define readl_relaxed(addr) __raw_readl(addr)
#define readq_relaxed(addr) __raw_readq(addr)
/*
* String version of IO memory access ops:
*/
extern
void
_memcpy_fromio
(
void
*
,
unsigned
long
,
long
);
extern
void
_memcpy_toio
(
unsigned
long
,
const
void
*
,
long
);
extern
void
_memset_c_io
(
unsigned
long
,
unsigned
long
,
long
);
extern
void
memcpy_fromio
(
void
*
,
const
volatile
void
__iomem
*
,
long
);
extern
void
memcpy_toio
(
volatile
void
__iomem
*
,
const
void
*
,
long
);
extern
void
_memset_c_io
(
volatile
void
__iomem
*
,
unsigned
long
,
long
);
#define memcpy_fromio(to,from,len) \
_memcpy_fromio((to),(unsigned long)(from),(len))
#define memcpy_toio(to,from,len) \
_memcpy_toio((unsigned long)(to),(from),(len))
#define memset_io(addr,c,len) \
_memset_c_io((unsigned long)(addr),0x0101010101010101UL*(u8)(c),(len))
static
inline
void
memset_io
(
volatile
void
__iomem
*
addr
,
u8
c
,
long
len
)
{
_memset_c_io
(
addr
,
0x0101010101010101UL
*
c
,
len
);
}
#define __HAVE_ARCH_MEMSETW_IO
#define memsetw_io(addr,c,len) \
_memset_c_io((unsigned long)(addr),0x0001000100010001UL*(u16)(c),(len))
static
inline
void
memsetw_io
(
volatile
void
__iomem
*
addr
,
u16
c
,
long
len
)
{
_memset_c_io
(
addr
,
0x0001000100010001UL
*
c
,
len
);
}
/*
* String versions of in/out ops:
...
...
@@ -465,26 +523,22 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
*/
#define eth_io_copy_and_sum(skb,src,len,unused) \
memcpy_fromio((skb)->data,
(src),(len)
)
memcpy_fromio((skb)->data,
src,len
)
#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
isa_memcpy_fromio((skb)->data,
(src),(len)
)
isa_memcpy_fromio((skb)->data,
src,len
)
static
inline
int
check_signature
(
unsigned
long
io_addr
,
const
unsigned
char
*
signature
,
int
length
)
check_signature
(
const
volatile
void
__iomem
*
io_addr
,
const
unsigned
char
*
signature
,
int
length
)
{
int
retval
=
0
;
do
{
if
(
readb
(
io_addr
)
!=
*
signature
)
goto
out
;
return
0
;
io_addr
++
;
signature
++
;
length
--
;
}
while
(
length
);
retval
=
1
;
out:
return
retval
;
}
while
(
--
length
);
return
1
;
}
...
...
@@ -492,31 +546,89 @@ check_signature(unsigned long io_addr, const unsigned char *signature,
* ISA space is mapped to some machine-specific location on Alpha.
* Call into the existing hooks to get the address translated.
*/
#define isa_readb(a) readb(__ioremap((a),1))
#define isa_readw(a) readw(__ioremap((a),2))
#define isa_readl(a) readl(__ioremap((a),4))
#define isa_writeb(b,a) writeb((b),__ioremap((a),1))
#define isa_writew(w,a) writew((w),__ioremap((a),2))
#define isa_writel(l,a) writel((l),__ioremap((a),4))
#define isa_memset_io(a,b,c) memset_io(__ioremap((a),(c)),(b),(c))
#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ioremap((b),(c)),(c))
#define isa_memcpy_toio(a,b,c) memcpy_toio(__ioremap((a),(c)),(b),(c))
static
inline
u8
isa_readb
(
unsigned
long
offset
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
1
);
u8
ret
=
readb
(
addr
);
iounmap
(
addr
);
return
ret
;
}
static
inline
u16
isa_readw
(
unsigned
long
offset
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
u16
ret
=
readw
(
addr
);
iounmap
(
addr
);
return
ret
;
}
static
inline
u32
isa_readl
(
unsigned
long
offset
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
u32
ret
=
readl
(
addr
);
iounmap
(
addr
);
return
ret
;
}
static
inline
void
isa_writeb
(
u8
b
,
unsigned
long
offset
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
writeb
(
b
,
addr
);
iounmap
(
addr
);
}
static
inline
void
isa_writew
(
u16
w
,
unsigned
long
offset
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
writew
(
w
,
addr
);
iounmap
(
addr
);
}
static
inline
void
isa_writel
(
u32
l
,
unsigned
long
offset
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
writel
(
l
,
addr
);
iounmap
(
addr
);
}
static
inline
void
isa_memset_io
(
unsigned
long
offset
,
u8
val
,
long
n
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
n
);
memset_io
(
addr
,
val
,
n
);
iounmap
(
addr
);
}
static
inline
void
isa_memcpy_fromio
(
void
*
dest
,
unsigned
long
offset
,
long
n
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
n
);
memcpy_fromio
(
dest
,
addr
,
n
);
iounmap
(
addr
);
}
static
inline
void
isa_memcpy_toio
(
unsigned
long
offset
,
const
void
*
src
,
long
n
)
{
void
__iomem
*
addr
=
ioremap
(
offset
,
n
);
memcpy_toio
(
addr
,
src
,
n
);
iounmap
(
addr
);
}
static
inline
int
isa_check_signature
(
unsigned
long
io_addr
,
const
unsigned
char
*
signature
,
int
length
)
isa_check_signature
(
unsigned
long
offset
,
const
unsigned
char
*
sig
,
long
len
)
{
int
retval
=
0
;
do
{
if
(
isa_readb
(
io_addr
)
!=
*
signature
)
goto
out
;
io_addr
++
;
signature
++
;
length
--
;
}
while
(
length
);
retval
=
1
;
out:
return
retval
;
void
__iomem
*
addr
=
ioremap
(
offset
,
len
);
int
ret
=
check_signature
(
addr
,
sig
,
len
);
iounmap
(
addr
);
return
ret
;
}
...
...
include/asm-alpha/io_trivial.h
0 → 100644
View file @
17647b1d
/* Trivial implementations of basic i/o routines. Assumes that all
of the hard work has been done by ioremap and ioportmap, and that
access to i/o space is linear. */
/* This file may be included multiple times. */
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
__EXTERN_INLINE
unsigned
int
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)(
void
__iomem
*
a
)
{
return
__kernel_ldbu
(
*
(
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
unsigned
int
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)(
void
__iomem
*
a
)
{
return
__kernel_ldwu
(
*
(
volatile
u16
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
u8
b
,
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
u16
b
,
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u16
__force
*
)
a
);
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
__EXTERN_INLINE
unsigned
int
IO_CONCAT
(
__IO_PREFIX
,
ioread32
)(
void
__iomem
*
a
)
{
return
*
(
volatile
u32
__force
*
)
a
;
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iowrite32
)(
u32
b
,
void
__iomem
*
a
)
{
*
(
volatile
u32
__force
*
)
a
=
b
;
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
__EXTERN_INLINE
u8
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
const
volatile
void
__iomem
*
a
)
{
return
__kernel_ldbu
(
*
(
const
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
u16
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
const
volatile
void
__iomem
*
a
)
{
return
__kernel_ldwu
(
*
(
const
volatile
u16
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
u8
b
,
volatile
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
u16
b
,
volatile
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u16
__force
*
)
a
);
}
#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2
__EXTERN_INLINE
u8
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
const
volatile
void
__iomem
*
a
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)((
void
__iomem
*
)
a
);
}
__EXTERN_INLINE
u16
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
const
volatile
void
__iomem
*
a
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)((
void
__iomem
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
u8
b
,
volatile
void
__iomem
*
a
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
b
,
(
void
__iomem
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
u16
b
,
volatile
void
__iomem
*
a
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
b
,
(
void
__iomem
*
)
a
);
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
__EXTERN_INLINE
u32
IO_CONCAT
(
__IO_PREFIX
,
readl
)(
const
volatile
void
__iomem
*
a
)
{
return
*
(
const
volatile
u32
__force
*
)
a
;
}
__EXTERN_INLINE
u64
IO_CONCAT
(
__IO_PREFIX
,
readq
)(
const
volatile
void
__iomem
*
a
)
{
return
*
(
const
volatile
u64
__force
*
)
a
;
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writel
)(
u32
b
,
volatile
void
__iomem
*
a
)
{
*
(
volatile
u32
__force
*
)
a
=
b
;
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writeq
)(
u64
b
,
volatile
void
__iomem
*
a
)
{
*
(
volatile
u64
__force
*
)
a
=
b
;
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_iounmap)
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iounmap
)(
volatile
void
__iomem
*
a
)
{
}
#endif
include/asm-alpha/jensen.h
View file @
17647b1d
...
...
@@ -200,8 +200,9 @@ __EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr)
* Memory functions.
*/
__EXTERN_INLINE
u8
jensen_readb
(
unsigned
long
addr
)
__EXTERN_INLINE
u8
jensen_readb
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
long
result
;
jensen_set_hae
(
addr
);
...
...
@@ -211,8 +212,9 @@ __EXTERN_INLINE u8 jensen_readb(unsigned long addr)
return
0xffUL
&
result
;
}
__EXTERN_INLINE
u16
jensen_readw
(
unsigned
long
addr
)
__EXTERN_INLINE
u16
jensen_readw
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
long
result
;
jensen_set_hae
(
addr
);
...
...
@@ -222,15 +224,17 @@ __EXTERN_INLINE u16 jensen_readw(unsigned long addr)
return
0xffffUL
&
result
;
}
__EXTERN_INLINE
u32
jensen_readl
(
unsigned
long
addr
)
__EXTERN_INLINE
u32
jensen_readl
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
jensen_set_hae
(
addr
);
addr
&=
JENSEN_HAE_MASK
;
return
*
(
vuip
)
((
addr
<<
7
)
+
EISA_MEM
+
0x60
);
}
__EXTERN_INLINE
u64
jensen_readq
(
unsigned
long
addr
)
__EXTERN_INLINE
u64
jensen_readq
(
const
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
r0
,
r1
;
jensen_set_hae
(
addr
);
...
...
@@ -241,29 +245,33 @@ __EXTERN_INLINE u64 jensen_readq(unsigned long addr)
return
r1
<<
32
|
r0
;
}
__EXTERN_INLINE
void
jensen_writeb
(
u8
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
jensen_writeb
(
u8
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
jensen_set_hae
(
addr
);
addr
&=
JENSEN_HAE_MASK
;
*
(
vuip
)
((
addr
<<
7
)
+
EISA_MEM
+
0x00
)
=
b
*
0x01010101
;
}
__EXTERN_INLINE
void
jensen_writew
(
u16
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
jensen_writew
(
u16
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
jensen_set_hae
(
addr
);
addr
&=
JENSEN_HAE_MASK
;
*
(
vuip
)
((
addr
<<
7
)
+
EISA_MEM
+
0x20
)
=
b
*
0x00010001
;
}
__EXTERN_INLINE
void
jensen_writel
(
u32
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
jensen_writel
(
u32
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
jensen_set_hae
(
addr
);
addr
&=
JENSEN_HAE_MASK
;
*
(
vuip
)
((
addr
<<
7
)
+
EISA_MEM
+
0x60
)
=
b
;
}
__EXTERN_INLINE
void
jensen_writeq
(
u64
b
,
unsigned
long
addr
)
__EXTERN_INLINE
void
jensen_writeq
(
u64
b
,
volatile
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
jensen_set_hae
(
addr
);
addr
&=
JENSEN_HAE_MASK
;
addr
=
(
addr
<<
7
)
+
EISA_MEM
+
0x60
;
...
...
@@ -271,15 +279,15 @@ __EXTERN_INLINE void jensen_writeq(u64 b, unsigned long addr)
*
(
vuip
)
(
addr
+
(
4
<<
7
))
=
b
>>
32
;
}
__EXTERN_INLINE
unsigned
long
jensen_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
__EXTERN_INLINE
void
__iomem
*
jensen_ioportmap
(
unsigned
long
addr
)
{
return
addr
;
return
(
void
__iomem
*
)
addr
;
}
__EXTERN_INLINE
void
jensen_iounmap
(
unsigned
long
addr
)
__EXTERN_INLINE
void
__iomem
*
jensen_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
0x100000000ul
)
;
}
__EXTERN_INLINE
int
jensen_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -287,39 +295,46 @@ __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
return
(
long
)
addr
>=
0
;
}
#undef vuip
__EXTERN_INLINE
int
jensen_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
(
unsigned
long
)
addr
>=
0x100000000ul
;
}
#ifdef __WANT_IO_DEF
#define __inb jensen_inb
#define __inw jensen_inw
#define __inl jensen_inl
#define __outb jensen_outb
#define __outw jensen_outw
#define __outl jensen_outl
#define __readb jensen_readb
#define __readw jensen_readw
#define __writeb jensen_writeb
#define __writew jensen_writew
#define __readl jensen_readl
#define __readq jensen_readq
#define __writel jensen_writel
#define __writeq jensen_writeq
#define __ioremap jensen_ioremap
#define __iounmap(a) jensen_iounmap((unsigned long)a)
#define __is_ioaddr jensen_is_ioaddr
/* New-style ioread interface. All the routines are so ugly for Jensen
that it doesn't make sense to merge them. */
#define IOPORT(OS, NS) \
__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \
{ \
if (jensen_is_mmio(xaddr)) \
return jensen_read##OS(xaddr - 0x100000000ul); \
else \
return jensen_in##OS((unsigned long)xaddr); \
} \
__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \
{ \
if (jensen_is_mmio(xaddr)) \
jensen_write##OS(b, xaddr - 0x100000000ul); \
else \
jensen_out##OS(b, (unsigned long)xaddr); \
}
/*
* The above have so much overhead that it probably doesn't make
* sense to have them inlined (better icache behaviour).
*/
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
IOPORT
(
b
,
8
)
IOPORT
(
w
,
16
)
IOPORT
(
l
,
32
)
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
#undef IOPORT
#undef vuip
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX jensen
#define jensen_trivial_rw_bw 0
#define jensen_trivial_rw_lq 0
#define jensen_trivial_io_bw 0
#define jensen_trivial_io_lq 0
#define jensen_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/machvec.h
View file @
17647b1d
...
...
@@ -45,27 +45,29 @@ struct alpha_machine_vector
void
(
*
mv_pci_tbi
)(
struct
pci_controller
*
hose
,
dma_addr_t
start
,
dma_addr_t
end
);
u8
(
*
mv_inb
)(
unsigned
long
);
u16
(
*
mv_inw
)(
unsigned
long
);
u32
(
*
mv_inl
)(
unsigned
long
);
void
(
*
mv_outb
)(
u8
,
unsigned
long
);
void
(
*
mv_outw
)(
u16
,
unsigned
long
);
void
(
*
mv_outl
)(
u32
,
unsigned
long
);
u8
(
*
mv_readb
)(
unsigned
long
);
u16
(
*
mv_readw
)(
unsigned
long
);
u32
(
*
mv_readl
)(
unsigned
long
);
u64
(
*
mv_readq
)(
unsigned
long
);
void
(
*
mv_writeb
)(
u8
,
unsigned
long
);
void
(
*
mv_writew
)(
u16
,
unsigned
long
);
void
(
*
mv_writel
)(
u32
,
unsigned
long
);
void
(
*
mv_writeq
)(
u64
,
unsigned
long
);
unsigned
long
(
*
mv_ioremap
)(
unsigned
long
,
unsigned
long
);
void
(
*
mv_iounmap
)(
unsigned
long
);
unsigned
int
(
*
mv_ioread8
)(
void
__iomem
*
);
unsigned
int
(
*
mv_ioread16
)(
void
__iomem
*
);
unsigned
int
(
*
mv_ioread32
)(
void
__iomem
*
);
void
(
*
mv_iowrite8
)(
u8
,
void
__iomem
*
);
void
(
*
mv_iowrite16
)(
u16
,
void
__iomem
*
);
void
(
*
mv_iowrite32
)(
u32
,
void
__iomem
*
);
u8
(
*
mv_readb
)(
const
volatile
void
__iomem
*
);
u16
(
*
mv_readw
)(
const
volatile
void
__iomem
*
);
u32
(
*
mv_readl
)(
const
volatile
void
__iomem
*
);
u64
(
*
mv_readq
)(
const
volatile
void
__iomem
*
);
void
(
*
mv_writeb
)(
u8
,
volatile
void
__iomem
*
);
void
(
*
mv_writew
)(
u16
,
volatile
void
__iomem
*
);
void
(
*
mv_writel
)(
u32
,
volatile
void
__iomem
*
);
void
(
*
mv_writeq
)(
u64
,
volatile
void
__iomem
*
);
void
__iomem
*
(
*
mv_ioportmap
)(
unsigned
long
);
void
__iomem
*
(
*
mv_ioremap
)(
unsigned
long
,
unsigned
long
);
void
(
*
mv_iounmap
)(
volatile
void
__iomem
*
);
int
(
*
mv_is_ioaddr
)(
unsigned
long
);
int
(
*
mv_is_mmio
)(
const
volatile
void
__iomem
*
);
void
(
*
mv_switch_mm
)(
struct
mm_struct
*
,
struct
mm_struct
*
,
struct
task_struct
*
);
...
...
include/asm-alpha/mmu_context.h
View file @
17647b1d
...
...
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <asm/system.h>
#include <asm/machvec.h>
#include <asm/compiler.h>
/*
* Force a context reload. This is needed when we change the page
...
...
include/asm-alpha/spinlock.h
View file @
17647b1d
...
...
@@ -95,7 +95,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
/***********************************************************/
typedef
struct
{
volatile
int
write_lock
:
1
,
read_counter
:
31
;
volatile
unsigned
int
write_lock
:
1
,
read_counter
:
31
;
}
/*__attribute__((aligned(32)))*/
rwlock_t
;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
...
...
@@ -124,7 +124,7 @@ static inline void _raw_write_lock(rwlock_t * lock)
" br 1b
\n
"
".previous"
:
"=m"
(
*
lock
),
"=&r"
(
regx
)
:
"
0
"
(
*
lock
)
:
"memory"
);
:
"
m
"
(
*
lock
)
:
"memory"
);
}
static
inline
void
_raw_read_lock
(
rwlock_t
*
lock
)
...
...
include/asm-alpha/system.h
View file @
17647b1d
...
...
@@ -55,9 +55,9 @@
*/
struct
el_common
{
unsigned
int
size
;
/* size in bytes of logout area */
int
sbz1
:
30
;
/* should be zero */
int
err2
:
1
;
/* second error */
int
retry
:
1
;
/* retry flag */
unsigned
int
sbz1
:
30
;
/* should be zero */
unsigned
int
err2
:
1
;
/* second error */
unsigned
int
retry
:
1
;
/* retry flag */
unsigned
int
proc_offset
;
/* processor-specific offset */
unsigned
int
sys_offset
;
/* system-specific offset */
unsigned
int
code
;
/* machine check code */
...
...
include/asm-alpha/tlbflush.h
View file @
17647b1d
...
...
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/compiler.h>
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
...
...
include/asm-alpha/vga.h
View file @
17647b1d
...
...
@@ -15,24 +15,24 @@
extern
inline
void
scr_writew
(
u16
val
,
volatile
u16
*
addr
)
{
if
(
__is_ioaddr
(
(
unsigned
long
)
addr
))
__raw_writew
(
val
,
(
unsigned
long
)
addr
);
if
(
__is_ioaddr
(
addr
))
__raw_writew
(
val
,
(
volatile
u16
__iomem
*
)
addr
);
else
*
addr
=
val
;
}
extern
inline
u16
scr_readw
(
volatile
const
u16
*
addr
)
{
if
(
__is_ioaddr
(
(
unsigned
long
)
addr
))
return
__raw_readw
((
unsigned
long
)
addr
);
if
(
__is_ioaddr
(
addr
))
return
__raw_readw
((
volatile
const
u16
__iomem
*
)
addr
);
else
return
*
addr
;
}
extern
inline
void
scr_memsetw
(
u16
*
s
,
u16
c
,
unsigned
int
count
)
{
if
(
__is_ioaddr
(
(
unsigned
long
)
s
))
memsetw_io
(
s
,
c
,
count
);
if
(
__is_ioaddr
(
s
))
memsetw_io
(
(
u16
__iomem
*
)
s
,
c
,
count
);
else
memsetw
(
s
,
c
,
count
);
}
...
...
@@ -43,9 +43,9 @@ extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
/* ??? These are currently only used for downloading character sets. As
such, they don't need memory barriers. Is this all they are intended
to be used for? */
#define vga_readb
readb
#define vga_writeb
writeb
#define vga_readb
(a) readb((u8 __iomem *)(a))
#define vga_writeb
(v,a) writeb(v, (u8 __iomem *)(a))
#define VGA_MAP_MEM(x) ((unsigned long) ioremap(
(x)
, 0))
#define VGA_MAP_MEM(x) ((unsigned long) ioremap(
x
, 0))
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment