Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
63dd622b
Commit
63dd622b
authored
Sep 22, 2004
by
Richard Henderson
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[ALPHA] Implement new ioread interface.
parent
91455b8f
Changes
47
Show whitespace changes
Inline
Side-by-side
Showing
47 changed files
with
1542 additions
and
2252 deletions
+1542
-2252
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/alpha_ksyms.c
+0
-30
arch/alpha/kernel/core_marvel.c
arch/alpha/kernel/core_marvel.c
+141
-86
arch/alpha/kernel/core_titan.c
arch/alpha/kernel/core_titan.c
+32
-18
arch/alpha/kernel/err_titan.c
arch/alpha/kernel/err_titan.c
+1
-1
arch/alpha/kernel/machvec_impl.h
arch/alpha/kernel/machvec_impl.h
+45
-56
arch/alpha/kernel/pci-noop.c
arch/alpha/kernel/pci-noop.c
+12
-0
arch/alpha/kernel/pci.c
arch/alpha/kernel/pci.c
+34
-0
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_alcor.c
+2
-4
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_cabriolet.c
+0
-5
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_dp264.c
+0
-5
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eb64p.c
+0
-2
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_eiger.c
+0
-1
arch/alpha/kernel/sys_jensen.c
arch/alpha/kernel/sys_jensen.c
+0
-3
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_marvel.c
+0
-1
arch/alpha/kernel/sys_miata.c
arch/alpha/kernel/sys_miata.c
+0
-1
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_mikasa.c
+0
-2
arch/alpha/kernel/sys_nautilus.c
arch/alpha/kernel/sys_nautilus.c
+0
-1
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_noritake.c
+0
-2
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_rawhide.c
+0
-1
arch/alpha/kernel/sys_ruffian.c
arch/alpha/kernel/sys_ruffian.c
+0
-1
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_rx164.c
+0
-1
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_sable.c
+0
-3
arch/alpha/kernel/sys_sio.c
arch/alpha/kernel/sys_sio.c
+0
-5
arch/alpha/kernel/sys_sx164.c
arch/alpha/kernel/sys_sx164.c
+0
-1
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_takara.c
+0
-1
arch/alpha/kernel/sys_titan.c
arch/alpha/kernel/sys_titan.c
+5
-7
arch/alpha/kernel/sys_wildfire.c
arch/alpha/kernel/sys_wildfire.c
+0
-1
arch/alpha/lib/io.c
arch/alpha/lib/io.c
+255
-223
include/asm-alpha/compiler.h
include/asm-alpha/compiler.h
+10
-0
include/asm-alpha/core_apecs.h
include/asm-alpha/core_apecs.h
+85
-125
include/asm-alpha/core_cia.h
include/asm-alpha/core_cia.h
+83
-208
include/asm-alpha/core_irongate.h
include/asm-alpha/core_irongate.h
+16
-116
include/asm-alpha/core_lca.h
include/asm-alpha/core_lca.h
+83
-128
include/asm-alpha/core_marvel.h
include/asm-alpha/core_marvel.h
+21
-190
include/asm-alpha/core_mcpcia.h
include/asm-alpha/core_mcpcia.h
+81
-184
include/asm-alpha/core_polaris.h
include/asm-alpha/core_polaris.h
+15
-127
include/asm-alpha/core_t2.h
include/asm-alpha/core_t2.h
+49
-33
include/asm-alpha/core_titan.h
include/asm-alpha/core_titan.h
+14
-130
include/asm-alpha/core_tsunami.h
include/asm-alpha/core_tsunami.h
+16
-133
include/asm-alpha/core_wildfire.h
include/asm-alpha/core_wildfire.h
+16
-133
include/asm-alpha/io.h
include/asm-alpha/io.h
+346
-241
include/asm-alpha/io_trivial.h
include/asm-alpha/io_trivial.h
+127
-0
include/asm-alpha/jensen.h
include/asm-alpha/jensen.h
+40
-33
include/asm-alpha/machvec.h
include/asm-alpha/machvec.h
+10
-8
include/asm-alpha/mmu_context.h
include/asm-alpha/mmu_context.h
+1
-0
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock.h
+1
-1
include/asm-alpha/tlbflush.h
include/asm-alpha/tlbflush.h
+1
-0
No files found.
arch/alpha/kernel/alpha_ksyms.c
View file @
63dd622b
...
...
@@ -68,36 +68,6 @@ EXPORT_SYMBOL(alpha_using_srm);
#endif
/* CONFIG_ALPHA_GENERIC */
/* platform dependent support */
EXPORT_SYMBOL
(
_inb
);
EXPORT_SYMBOL
(
_inw
);
EXPORT_SYMBOL
(
_inl
);
EXPORT_SYMBOL
(
_outb
);
EXPORT_SYMBOL
(
_outw
);
EXPORT_SYMBOL
(
_outl
);
EXPORT_SYMBOL
(
_readb
);
EXPORT_SYMBOL
(
_readw
);
EXPORT_SYMBOL
(
_readl
);
EXPORT_SYMBOL
(
_writeb
);
EXPORT_SYMBOL
(
_writew
);
EXPORT_SYMBOL
(
_writel
);
EXPORT_SYMBOL
(
___raw_readb
);
EXPORT_SYMBOL
(
___raw_readw
);
EXPORT_SYMBOL
(
___raw_readl
);
EXPORT_SYMBOL
(
___raw_readq
);
EXPORT_SYMBOL
(
___raw_writeb
);
EXPORT_SYMBOL
(
___raw_writew
);
EXPORT_SYMBOL
(
___raw_writel
);
EXPORT_SYMBOL
(
___raw_writeq
);
EXPORT_SYMBOL
(
_memcpy_fromio
);
EXPORT_SYMBOL
(
_memcpy_toio
);
EXPORT_SYMBOL
(
_memset_c_io
);
EXPORT_SYMBOL
(
scr_memcpyw
);
EXPORT_SYMBOL
(
insb
);
EXPORT_SYMBOL
(
insw
);
EXPORT_SYMBOL
(
insl
);
EXPORT_SYMBOL
(
outsb
);
EXPORT_SYMBOL
(
outsw
);
EXPORT_SYMBOL
(
outsl
);
EXPORT_SYMBOL
(
strcat
);
EXPORT_SYMBOL
(
strcmp
);
EXPORT_SYMBOL
(
strcpy
);
...
...
arch/alpha/kernel/core_marvel.c
View file @
63dd622b
...
...
@@ -610,11 +610,84 @@ marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
csrs
->
POx_SG_TBIA
.
csr
;
}
/*
* RTC Support
*/
struct
marvel_rtc_access_info
{
unsigned
long
function
;
unsigned
long
index
;
unsigned
long
data
;
};
static
void
__marvel_access_rtc
(
void
*
info
)
{
struct
marvel_rtc_access_info
*
rtc_access
=
info
;
register
unsigned
long
__r0
__asm__
(
"$0"
);
register
unsigned
long
__r16
__asm__
(
"$16"
)
=
rtc_access
->
function
;
register
unsigned
long
__r17
__asm__
(
"$17"
)
=
rtc_access
->
index
;
register
unsigned
long
__r18
__asm__
(
"$18"
)
=
rtc_access
->
data
;
__asm__
__volatile__
(
"call_pal %4 # cserve rtc"
:
"=r"
(
__r16
),
"=r"
(
__r17
),
"=r"
(
__r18
),
"=r"
(
__r0
)
:
"i"
(
PAL_cserve
),
"0"
(
__r16
),
"1"
(
__r17
),
"2"
(
__r18
)
:
"$1"
,
"$22"
,
"$23"
,
"$24"
,
"$25"
);
rtc_access
->
data
=
__r0
;
}
static
u8
__marvel_rtc_io
(
u8
b
,
unsigned
long
addr
,
int
write
)
{
static
u8
index
=
0
;
struct
marvel_rtc_access_info
rtc_access
;
u8
ret
=
0
;
switch
(
addr
)
{
case
0x70
:
/* RTC_PORT(0) */
if
(
write
)
index
=
b
;
ret
=
index
;
break
;
case
0x71
:
/* RTC_PORT(1) */
rtc_access
.
index
=
index
;
rtc_access
.
data
=
BCD_TO_BIN
(
b
);
rtc_access
.
function
=
0x48
+
!
write
;
/* GET/PUT_TOY */
#ifdef CONFIG_SMP
if
(
smp_processor_id
()
!=
boot_cpuid
)
smp_call_function_on_cpu
(
__marvel_access_rtc
,
&
rtc_access
,
1
,
1
,
cpumask_of_cpu
(
boot_cpuid
));
else
__marvel_access_rtc
(
&
rtc_access
);
#else
__marvel_access_rtc
(
&
rtc_access
);
#endif
ret
=
BIN_TO_BCD
(
rtc_access
.
data
);
break
;
default:
printk
(
KERN_WARNING
"Illegal RTC port %lx
\n
"
,
addr
);
break
;
}
return
ret
;
}
/*
* IO map support.
*/
unsigned
long
#define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
void
__iomem
*
marvel_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
struct
pci_controller
*
hose
;
...
...
@@ -633,8 +706,6 @@ marvel_ioremap(unsigned long addr, unsigned long size)
}
#endif
if
(
!
marvel_is_ioaddr
(
addr
))
return
0UL
;
/*
* Find the hose.
*/
...
...
@@ -643,7 +714,7 @@ marvel_ioremap(unsigned long addr, unsigned long size)
break
;
}
if
(
!
hose
)
return
0U
L
;
return
NUL
L
;
/*
* We have the hose - calculate the bus limits.
...
...
@@ -655,15 +726,17 @@ marvel_ioremap(unsigned long addr, unsigned long size)
* Is it direct-mapped?
*/
if
((
baddr
>=
__direct_map_base
)
&&
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
return
IDENT_ADDR
|
(
baddr
-
__direct_map_base
);
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
{
addr
=
IDENT_ADDR
|
(
baddr
-
__direct_map_base
);
return
(
void
__iomem
*
)
addr
;
}
/*
* Check the scatter-gather arena.
*/
if
(
hose
->
sg_pci
&&
baddr
>=
(
unsigned
long
)
hose
->
sg_pci
->
dma_base
&&
last
<
(
unsigned
long
)
hose
->
sg_pci
->
dma_base
+
hose
->
sg_pci
->
size
){
last
<
(
unsigned
long
)
hose
->
sg_pci
->
dma_base
+
hose
->
sg_pci
->
size
)
{
/*
* Adjust the limits (mappings must be page aligned)
...
...
@@ -677,7 +750,9 @@ marvel_ioremap(unsigned long addr, unsigned long size)
* Map it.
*/
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
if
(
!
area
)
return
(
unsigned
long
)
NULL
;
if
(
!
area
)
return
NULL
;
ptes
=
hose
->
sg_pci
->
ptes
;
for
(
vaddr
=
(
unsigned
long
)
area
->
addr
;
baddr
<=
last
;
...
...
@@ -686,7 +761,7 @@ marvel_ioremap(unsigned long addr, unsigned long size)
if
(
!
(
pfn
&
1
))
{
printk
(
"ioremap failed... pte not valid...
\n
"
);
vfree
(
area
->
addr
);
return
0U
L
;
return
NUL
L
;
}
pfn
>>=
1
;
/* make it a true pfn */
...
...
@@ -695,7 +770,7 @@ marvel_ioremap(unsigned long addr, unsigned long size)
PAGE_SIZE
,
0
))
{
printk
(
"FAILED to map...
\n
"
);
vfree
(
area
->
addr
);
return
0U
L
;
return
NUL
L
;
}
}
...
...
@@ -703,101 +778,81 @@ marvel_ioremap(unsigned long addr, unsigned long size)
vaddr
=
(
unsigned
long
)
area
->
addr
+
(
addr
&
~
PAGE_MASK
);
return
vaddr
;
return
(
void
__iomem
*
)
vaddr
;
}
/*
* Not found - assume legacy ioremap.
*/
return
addr
;
return
NULL
;
}
void
marvel_iounmap
(
unsigned
long
addr
)
marvel_iounmap
(
volatile
void
__iomem
*
x
addr
)
{
if
(((
long
)
addr
>>
41
)
==
-
2
)
return
;
/* kseg map, nothing to do */
if
(
addr
)
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
>=
VMALLOC_START
)
vfree
((
void
*
)(
PAGE_MASK
&
addr
));
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL
(
marvel_ioremap
);
EXPORT_SYMBOL
(
marvel_iounmap
);
#endif
/*
* RTC Support
*/
struct
marvel_rtc_access_info
{
unsigned
long
function
;
unsigned
long
index
;
unsigned
long
data
;
};
static
void
__marvel_access_rtc
(
void
*
info
)
int
marvel_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
struct
marvel_rtc_access_info
*
rtc_access
=
info
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
register
unsigned
long
__r0
__asm__
(
"$0"
);
register
unsigned
long
__r16
__asm__
(
"$16"
)
=
rtc_access
->
function
;
register
unsigned
long
__r17
__asm__
(
"$17"
)
=
rtc_access
->
index
;
register
unsigned
long
__r18
__asm__
(
"$18"
)
=
rtc_access
->
data
;
if
(
addr
>=
VMALLOC_START
)
return
1
;
else
return
(
addr
&
0xFF000000UL
)
==
0
;
}
__asm__
__volatile__
(
"call_pal %4 # cserve rtc"
:
"=r"
(
__r16
),
"=r"
(
__r17
),
"=r"
(
__r18
),
"=r"
(
__r0
)
:
"i"
(
PAL_cserve
),
"0"
(
__r16
),
"1"
(
__r17
),
"2"
(
__r18
)
:
"$1"
,
"$22"
,
"$23"
,
"$24"
,
"$25"
);
#define __marvel_is_port_vga(a) \
(((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
rtc_access
->
data
=
__r0
;
void
__iomem
*
marvel_ioportmap
(
unsigned
long
addr
)
{
if
(
__marvel_is_port_rtc
(
addr
)
||
__marvel_is_port_kbd
(
addr
))
;
#ifdef CONFIG_VGA_HOSE
else
if
(
__marvel_is_port_vga
(
addr
)
&&
pci_vga_hose
)
addr
+=
pci_vga_hose
->
io_space
->
start
;
#endif
else
return
NULL
;
return
(
void
__iomem
*
)
addr
;
}
u
8
__marvel_rtc_io
(
int
write
,
u8
b
,
unsigned
long
addr
)
u
nsigned
int
marvel_ioread8
(
void
__iomem
*
x
addr
)
{
struct
marvel_rtc_access_info
rtc_access
=
{
0
,
};
static
u8
index
=
0
;
u8
ret
=
0
;
switch
(
addr
)
{
case
0x70
:
/* RTC_PORT(0) */
if
(
write
)
index
=
b
;
ret
=
index
;
break
;
case
0x71
:
/* RTC_PORT(1) */
rtc_access
.
index
=
index
;
rtc_access
.
data
=
BCD_TO_BIN
(
b
);
rtc_access
.
function
=
0x49
;
/* GET_TOY */
if
(
write
)
rtc_access
.
function
=
0x48
;
/* PUT_TOY */
#ifdef CONFIG_SMP
if
(
smp_processor_id
()
!=
boot_cpuid
)
smp_call_function_on_cpu
(
__marvel_access_rtc
,
&
rtc_access
,
1
,
/* retry */
1
,
/* wait */
1UL
<<
boot_cpuid
);
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
__marvel_is_port_kbd
(
addr
))
return
0
;
else
if
(
__marvel_is_port_rtc
(
addr
))
return
__marvel_rtc_io
(
0
,
addr
,
0
);
else
__marvel_access_rtc
(
&
rtc_access
);
#else
__marvel_access_rtc
(
&
rtc_access
);
#endif
ret
=
BIN_TO_BCD
(
rtc_access
.
data
);
break
;
default:
printk
(
KERN_WARNING
"Illegal RTC port %lx
\n
"
,
addr
);
break
;
}
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
return
ret
;
void
marvel_iowrite8
(
u8
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
__marvel_is_port_kbd
(
addr
))
return
;
else
if
(
__marvel_is_port_rtc
(
addr
))
__marvel_rtc_io
(
b
,
addr
,
1
);
else
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL
(
marvel_ioremap
);
EXPORT_SYMBOL
(
marvel_iounmap
);
EXPORT_SYMBOL
(
marvel_is_mmio
);
EXPORT_SYMBOL
(
marvel_ioportmap
);
EXPORT_SYMBOL
(
marvel_ioread8
);
EXPORT_SYMBOL
(
marvel_iowrite8
);
#endif
/*
* NUMA Support
...
...
arch/alpha/kernel/core_titan.c
View file @
63dd622b
...
...
@@ -461,7 +461,8 @@ titan_kill_arch(int mode)
/*
* IO map support.
*/
unsigned
long
void
__iomem
*
titan_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
int
h
=
(
addr
&
TITAN_HOSE_MASK
)
>>
TITAN_HOSE_SHIFT
;
...
...
@@ -487,15 +488,19 @@ titan_ioremap(unsigned long addr, unsigned long size)
* Find the hose.
*/
for
(
hose
=
hose_head
;
hose
;
hose
=
hose
->
next
)
if
(
hose
->
index
==
h
)
break
;
if
(
!
hose
)
return
(
unsigned
long
)
NULL
;
if
(
hose
->
index
==
h
)
break
;
if
(
!
hose
)
return
NULL
;
/*
* Is it direct-mapped?
*/
if
((
baddr
>=
__direct_map_base
)
&&
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
return
addr
-
__direct_map_base
+
TITAN_MEM_BIAS
;
((
baddr
+
size
-
1
)
<
__direct_map_base
+
__direct_map_size
))
{
vaddr
=
addr
-
__direct_map_base
+
TITAN_MEM_BIAS
;
return
(
void
__iomem
*
)
vaddr
;
}
/*
* Check the scatter-gather arena.
...
...
@@ -516,7 +521,9 @@ titan_ioremap(unsigned long addr, unsigned long size)
* Map it
*/
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
if
(
!
area
)
return
(
unsigned
long
)
NULL
;
if
(
!
area
)
return
NULL
;
ptes
=
hose
->
sg_pci
->
ptes
;
for
(
vaddr
=
(
unsigned
long
)
area
->
addr
;
baddr
<=
last
;
...
...
@@ -525,7 +532,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
if
(
!
(
pfn
&
1
))
{
printk
(
"ioremap failed... pte not valid...
\n
"
);
vfree
(
area
->
addr
);
return
(
unsigned
long
)
NULL
;
return
NULL
;
}
pfn
>>=
1
;
/* make it a true pfn */
...
...
@@ -534,35 +541,42 @@ titan_ioremap(unsigned long addr, unsigned long size)
PAGE_SIZE
,
0
))
{
printk
(
"FAILED to map...
\n
"
);
vfree
(
area
->
addr
);
return
(
unsigned
long
)
NULL
;
return
NULL
;
}
}
flush_tlb_all
();
vaddr
=
(
unsigned
long
)
area
->
addr
+
(
addr
&
~
PAGE_MASK
);
return
vaddr
;
return
(
void
__iomem
*
)
vaddr
;
}
/*
* Not found - assume legacy ioremap.
*/
return
addr
+
TITAN_MEM_BIAS
;
return
NULL
;
}
void
titan_iounmap
(
unsigned
long
addr
)
titan_iounmap
(
volatile
void
__iomem
*
x
addr
)
{
if
(((
long
)
addr
>>
41
)
==
-
2
)
return
;
/* kseg map, nothing to do */
if
(
addr
)
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
>=
VMALLOC_START
)
vfree
((
void
*
)(
PAGE_MASK
&
addr
));
}
int
titan_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
>=
VMALLOC_START
)
return
1
;
else
return
(
addr
&
0x100000000UL
)
==
0
;
}
#ifndef CONFIG_ALPHA_GENERIC
EXPORT_SYMBOL
(
titan_ioremap
);
EXPORT_SYMBOL
(
titan_iounmap
);
EXPORT_SYMBOL
(
titan_is_mmio
);
#endif
/*
...
...
arch/alpha/kernel/err_titan.c
View file @
63dd622b
...
...
@@ -177,7 +177,7 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
#define TITAN__PCHIP_PERROR__CMD__S (52)
#define TITAN__PCHIP_PERROR__CMD__M (0x0f)
#define TITAN__PCHIP_PERROR__ADDR__S (14)
#define TITAN__PCHIP_PERROR__ADDR__M (0x1ffffffff)
#define TITAN__PCHIP_PERROR__ADDR__M (0x1ffffffff
ul
)
if
(
!
(
perror
&
TITAN__PCHIP_PERROR__ERRMASK
))
return
MCHK_DISPOSITION_UNKNOWN_ERROR
;
...
...
arch/alpha/kernel/machvec_impl.h
View file @
63dd622b
...
...
@@ -44,56 +44,60 @@
#define DO_DEFAULT_RTC rtc_port: 0x70
#define DO_EV4_MMU \
max_asn:
EV4_MAX_ASN, \
mv_switch_mm:
ev4_switch_mm, \
mv_activate_mm:
ev4_activate_mm, \
mv_flush_tlb_current:
ev4_flush_tlb_current, \
mv_flush_tlb_current_page:
ev4_flush_tlb_current_page
.max_asn =
EV4_MAX_ASN, \
.mv_switch_mm =
ev4_switch_mm, \
.mv_activate_mm =
ev4_activate_mm, \
.mv_flush_tlb_current =
ev4_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev4_flush_tlb_current_page
#define DO_EV5_MMU \
max_asn:
EV5_MAX_ASN, \
mv_switch_mm:
ev5_switch_mm, \
mv_activate_mm:
ev5_activate_mm, \
mv_flush_tlb_current:
ev5_flush_tlb_current, \
mv_flush_tlb_current_page:
ev5_flush_tlb_current_page
.max_asn =
EV5_MAX_ASN, \
.mv_switch_mm =
ev5_switch_mm, \
.mv_activate_mm =
ev5_activate_mm, \
.mv_flush_tlb_current =
ev5_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev5_flush_tlb_current_page
#define DO_EV6_MMU \
max_asn:
EV6_MAX_ASN, \
mv_switch_mm:
ev5_switch_mm, \
mv_activate_mm:
ev5_activate_mm, \
mv_flush_tlb_current:
ev5_flush_tlb_current, \
mv_flush_tlb_current_page:
ev5_flush_tlb_current_page
.max_asn =
EV6_MAX_ASN, \
.mv_switch_mm =
ev5_switch_mm, \
.mv_activate_mm =
ev5_activate_mm, \
.mv_flush_tlb_current =
ev5_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev5_flush_tlb_current_page
#define DO_EV7_MMU \
max_asn:
EV6_MAX_ASN, \
mv_switch_mm:
ev5_switch_mm, \
mv_activate_mm:
ev5_activate_mm, \
mv_flush_tlb_current:
ev5_flush_tlb_current, \
mv_flush_tlb_current_page:
ev5_flush_tlb_current_page
.max_asn =
EV6_MAX_ASN, \
.mv_switch_mm =
ev5_switch_mm, \
.mv_activate_mm =
ev5_activate_mm, \
.mv_flush_tlb_current =
ev5_flush_tlb_current, \
.mv_flush_tlb_current_page =
ev5_flush_tlb_current_page
#define IO_LITE(UP,low) \
hae_register: (unsigned long *) CAT(UP,_HAE_ADDRESS), \
iack_sc: CAT(UP,_IACK_SC), \
mv_inb: CAT(low,_inb), \
mv_inw: CAT(low,_inw), \
mv_inl: CAT(low,_inl), \
mv_outb: CAT(low,_outb), \
mv_outw: CAT(low,_outw), \
mv_outl: CAT(low,_outl), \
mv_readb: CAT(low,_readb), \
mv_readw: CAT(low,_readw), \
mv_readl: CAT(low,_readl), \
mv_readq: CAT(low,_readq), \
mv_writeb: CAT(low,_writeb), \
mv_writew: CAT(low,_writew), \
mv_writel: CAT(low,_writel), \
mv_writeq: CAT(low,_writeq), \
mv_ioremap: CAT(low,_ioremap), \
mv_iounmap: CAT(low,_iounmap) \
.hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \
.iack_sc = CAT(UP,_IACK_SC), \
.mv_ioread8 = CAT(low,_ioread8), \
.mv_ioread16 = CAT(low,_ioread16), \
.mv_ioread32 = CAT(low,_ioread32), \
.mv_iowrite8 = CAT(low,_iowrite8), \
.mv_iowrite16 = CAT(low,_iowrite16), \
.mv_iowrite32 = CAT(low,_iowrite32), \
.mv_readb = CAT(low,_readb), \
.mv_readw = CAT(low,_readw), \
.mv_readl = CAT(low,_readl), \
.mv_readq = CAT(low,_readq), \
.mv_writeb = CAT(low,_writeb), \
.mv_writew = CAT(low,_writew), \
.mv_writel = CAT(low,_writel), \
.mv_writeq = CAT(low,_writeq), \
.mv_ioportmap = CAT(low,_ioportmap), \
.mv_ioremap = CAT(low,_ioremap), \
.mv_iounmap = CAT(low,_iounmap), \
.mv_is_ioaddr = CAT(low,_is_ioaddr), \
.mv_is_mmio = CAT(low,_is_mmio) \
#define IO(UP,low) \
IO_LITE(UP,low), \
pci_ops: &CAT(low,_pci_ops)
.pci_ops = &CAT(low,_pci_ops), \
.mv_pci_tbi = CAT(low,_pci_tbi)
#define DO_APECS_IO IO(APECS,apecs)
#define DO_CIA_IO IO(CIA,cia)
...
...
@@ -108,23 +112,8 @@
#define DO_WILDFIRE_IO IO(WILDFIRE,wildfire)
#define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \
pci_ops: &CAT(cia,_pci_ops)
#define BUS(which) \
mv_is_ioaddr: CAT(which,_is_ioaddr), \
mv_pci_tbi: CAT(which,_pci_tbi)
#define DO_APECS_BUS BUS(apecs)
#define DO_CIA_BUS BUS(cia)
#define DO_IRONGATE_BUS BUS(irongate)
#define DO_LCA_BUS BUS(lca)
#define DO_MARVEL_BUS BUS(marvel)
#define DO_MCPCIA_BUS BUS(mcpcia)
#define DO_POLARIS_BUS BUS(polaris)
#define DO_T2_BUS BUS(t2)
#define DO_TSUNAMI_BUS BUS(tsunami)
#define DO_TITAN_BUS BUS(titan)
#define DO_WILDFIRE_BUS BUS(wildfire)
.pci_ops = &cia_pci_ops, \
.mv_pci_tbi = cia_pci_tbi
/*
* In a GENERIC kernel, we have lots of these vectors floating about,
...
...
arch/alpha/kernel/pci-noop.c
View file @
63dd622b
...
...
@@ -200,3 +200,15 @@ dma_set_mask(struct device *dev, u64 mask)
return
0
;
}
void
__iomem
*
pci_iomap
(
struct
pci_dev
*
dev
,
int
bar
,
unsigned
long
maxlen
)
{
return
NULL
;
}
void
pci_iounmap
(
struct
pci_dev
*
dev
,
void
__iomem
*
addr
)
{
}
EXPORT_SYMBOL
(
pci_iomap
);
EXPORT_SYMBOL
(
pci_iounmap
);
arch/alpha/kernel/pci.c
View file @
63dd622b
...
...
@@ -531,3 +531,37 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
return
-
EOPNOTSUPP
;
}
/* Create an __iomem token from a PCI BAR. Copied from lib/iomap.c with
no changes, since we don't want the other things in that object file. */
void
__iomem
*
pci_iomap
(
struct
pci_dev
*
dev
,
int
bar
,
unsigned
long
maxlen
)
{
unsigned
long
start
=
pci_resource_start
(
dev
,
bar
);
unsigned
long
len
=
pci_resource_len
(
dev
,
bar
);
unsigned
long
flags
=
pci_resource_flags
(
dev
,
bar
);
if
(
!
len
||
!
start
)
return
NULL
;
if
(
maxlen
&&
len
>
maxlen
)
len
=
maxlen
;
if
(
flags
&
IORESOURCE_IO
)
return
ioport_map
(
start
,
len
);
if
(
flags
&
IORESOURCE_MEM
)
{
/* Not checking IORESOURCE_CACHEABLE because alpha does
not distinguish between ioremap and ioremap_nocache. */
return
ioremap
(
start
,
len
);
}
return
NULL
;
}
/* Destroy that token. Not copied from lib/iomap.c. */
void
pci_iounmap
(
struct
pci_dev
*
dev
,
void
__iomem
*
addr
)
{
if
(
__is_mmio
(
addr
))
iounmap
(
addr
);
}
EXPORT_SYMBOL
(
pci_iomap
);
EXPORT_SYMBOL
(
pci_iounmap
);
arch/alpha/kernel/sys_alcor.c
View file @
63dd622b
...
...
@@ -274,7 +274,6 @@ struct alpha_machine_vector alcor_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -302,7 +301,6 @@ struct alpha_machine_vector xlt_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_cabriolet.c
View file @
63dd622b
...
...
@@ -327,7 +327,6 @@ struct alpha_machine_vector cabriolet_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -354,7 +353,6 @@ struct alpha_machine_vector eb164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -380,7 +378,6 @@ struct alpha_machine_vector eb66p_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -405,7 +402,6 @@ struct alpha_machine_vector lx164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -432,7 +428,6 @@ struct alpha_machine_vector pc164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_dp264.c
View file @
63dd622b
...
...
@@ -569,7 +569,6 @@ struct alpha_machine_vector dp264_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -594,7 +593,6 @@ struct alpha_machine_vector monet_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -618,7 +616,6 @@ struct alpha_machine_vector webbrick_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -642,7 +639,6 @@ struct alpha_machine_vector clipper_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -671,7 +667,6 @@ struct alpha_machine_vector shark_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_eb64p.c
View file @
63dd622b
...
...
@@ -212,7 +212,6 @@ struct alpha_machine_vector eb64p_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -238,7 +237,6 @@ struct alpha_machine_vector eb66_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_eiger.c
View file @
63dd622b
...
...
@@ -222,7 +222,6 @@ struct alpha_machine_vector eiger_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TSUNAMI_IO
,
DO_TSUNAMI_BUS
,
.
machine_check
=
tsunami_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_jensen.c
View file @
63dd622b
...
...
@@ -249,8 +249,6 @@ jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs)
printk
(
KERN_CRIT
"Machine check
\n
"
);
}
#define jensen_pci_tbi ((void*)0)
/*
* The System Vector
...
...
@@ -260,7 +258,6 @@ struct alpha_machine_vector jensen_mv __initmv = {
.
vector_name
=
"Jensen"
,
DO_EV4_MMU
,
IO_LITE
(
JENSEN
,
jensen
),
BUS
(
jensen
),
.
machine_check
=
jensen_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
rtc_port
=
0x170
,
...
...
arch/alpha/kernel/sys_marvel.c
View file @
63dd622b
...
...
@@ -471,7 +471,6 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
DO_EV7_MMU
,
DO_DEFAULT_RTC
,
DO_MARVEL_IO
,
DO_MARVEL_BUS
,
.
machine_check
=
marvel_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_miata.c
View file @
63dd622b
...
...
@@ -269,7 +269,6 @@ struct alpha_machine_vector miata_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_mikasa.c
View file @
63dd622b
...
...
@@ -221,7 +221,6 @@ struct alpha_machine_vector mikasa_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
mikasa_apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -246,7 +245,6 @@ struct alpha_machine_vector mikasa_primo_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_nautilus.c
View file @
63dd622b
...
...
@@ -250,7 +250,6 @@ struct alpha_machine_vector nautilus_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_IRONGATE_IO
,
DO_IRONGATE_BUS
,
.
machine_check
=
nautilus_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_noritake.c
View file @
63dd622b
...
...
@@ -303,7 +303,6 @@ struct alpha_machine_vector noritake_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
noritake_apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -328,7 +327,6 @@ struct alpha_machine_vector noritake_primo_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_rawhide.c
View file @
63dd622b
...
...
@@ -250,7 +250,6 @@ struct alpha_machine_vector rawhide_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_MCPCIA_IO
,
DO_MCPCIA_BUS
,
.
machine_check
=
mcpcia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_ruffian.c
View file @
63dd622b
...
...
@@ -220,7 +220,6 @@ struct alpha_machine_vector ruffian_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_rx164.c
View file @
63dd622b
...
...
@@ -201,7 +201,6 @@ struct alpha_machine_vector rx164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_POLARIS_IO
,
DO_POLARIS_BUS
,
.
machine_check
=
polaris_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_sable.c
View file @
63dd622b
...
...
@@ -566,7 +566,6 @@ struct alpha_machine_vector sable_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_T2_IO
,
DO_T2_BUS
,
.
machine_check
=
t2_machine_check
,
.
max_isa_dma_address
=
ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -599,7 +598,6 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_T2_IO
,
DO_T2_BUS
,
.
machine_check
=
t2_machine_check
,
.
max_isa_dma_address
=
ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
@@ -631,7 +629,6 @@ struct alpha_machine_vector lynx_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_T2_IO
,
DO_T2_BUS
,
.
machine_check
=
t2_machine_check
,
.
max_isa_dma_address
=
ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
EISA_DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_sio.c
View file @
63dd622b
...
...
@@ -288,7 +288,6 @@ struct alpha_machine_vector alphabook1_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -319,7 +318,6 @@ struct alpha_machine_vector avanti_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
DO_APECS_BUS
,
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -349,7 +347,6 @@ struct alpha_machine_vector noname_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -388,7 +385,6 @@ struct alpha_machine_vector p2k_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_LCA_IO
,
DO_LCA_BUS
,
.
machine_check
=
lca_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -418,7 +414,6 @@ struct alpha_machine_vector xl_mv __initmv = {
DO_EV4_MMU
,
DO_DEFAULT_RTC
,
DO_APECS_IO
,
BUS
(
apecs
),
.
machine_check
=
apecs_machine_check
,
.
max_isa_dma_address
=
ALPHA_XL_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_sx164.c
View file @
63dd622b
...
...
@@ -158,7 +158,6 @@ struct alpha_machine_vector sx164_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_PYXIS_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_takara.c
View file @
63dd622b
...
...
@@ -277,7 +277,6 @@ struct alpha_machine_vector takara_mv __initmv = {
DO_EV5_MMU
,
DO_DEFAULT_RTC
,
DO_CIA_IO
,
DO_CIA_BUS
,
.
machine_check
=
cia_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_titan.c
View file @
63dd622b
...
...
@@ -66,7 +66,7 @@ titan_update_irq_hw(unsigned long mask)
register
int
bcpu
=
boot_cpuid
;
#ifdef CONFIG_SMP
register
unsigned
long
cpm
=
cpu_present_mask
;
cpumask_t
cpm
=
cpu_present_mask
;
volatile
unsigned
long
*
dim0
,
*
dim1
,
*
dim2
,
*
dim3
;
unsigned
long
mask0
,
mask1
,
mask2
,
mask3
,
dummy
;
...
...
@@ -85,10 +85,10 @@ titan_update_irq_hw(unsigned long mask)
dim1
=
&
cchip
->
dim1
.
csr
;
dim2
=
&
cchip
->
dim2
.
csr
;
dim3
=
&
cchip
->
dim3
.
csr
;
if
(
(
cpm
&
1
)
==
0
)
dim0
=
&
dummy
;
if
(
(
cpm
&
2
)
==
0
)
dim1
=
&
dummy
;
if
(
(
cpm
&
4
)
==
0
)
dim2
=
&
dummy
;
if
(
(
cpm
&
8
)
==
0
)
dim3
=
&
dummy
;
if
(
!
cpu_isset
(
0
,
cpm
)
)
dim0
=
&
dummy
;
if
(
!
cpu_isset
(
1
,
cpm
)
)
dim1
=
&
dummy
;
if
(
!
cpu_isset
(
2
,
cpm
)
)
dim2
=
&
dummy
;
if
(
!
cpu_isset
(
3
,
cpm
)
)
dim3
=
&
dummy
;
*
dim0
=
mask0
;
*
dim1
=
mask1
;
...
...
@@ -369,7 +369,6 @@ struct alpha_machine_vector titan_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TITAN_IO
,
DO_TITAN_BUS
,
.
machine_check
=
titan_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
@@ -397,7 +396,6 @@ struct alpha_machine_vector privateer_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_TITAN_IO
,
DO_TITAN_BUS
,
.
machine_check
=
privateer_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/kernel/sys_wildfire.c
View file @
63dd622b
...
...
@@ -337,7 +337,6 @@ struct alpha_machine_vector wildfire_mv __initmv = {
DO_EV6_MMU
,
DO_DEFAULT_RTC
,
DO_WILDFIRE_IO
,
DO_WILDFIRE_BUS
,
.
machine_check
=
wildfire_machine_check
,
.
max_isa_dma_address
=
ALPHA_MAX_ISA_DMA_ADDRESS
,
.
min_io_address
=
DEFAULT_IO_BASE
,
...
...
arch/alpha/lib/io.c
View file @
63dd622b
...
...
@@ -6,164 +6,246 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#include <asm/io.h>
u8
_inb
(
unsigned
long
addr
)
/* Out-of-line versions of the i/o routines that redirect into the
platform-specific version. Note that "platform-specific" may mean
"generic", which bumps through the machine vector. */
unsigned
int
ioread8
(
void
__iomem
*
addr
)
{
return
__inb
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)(
addr
);
mb
();
return
ret
;
}
u
16
_inw
(
unsigned
long
addr
)
u
nsigned
int
ioread16
(
void
__iomem
*
addr
)
{
return
__inw
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)(
addr
);
mb
();
return
ret
;
}
u
32
_inl
(
unsigned
long
addr
)
u
nsigned
int
ioread32
(
void
__iomem
*
addr
)
{
return
__inl
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread32
)(
addr
);
mb
();
return
ret
;
}
void
iowrite8
(
u8
b
,
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
b
,
addr
);
mb
();
}
void
_outb
(
u8
b
,
unsigned
long
addr
)
void
iowrite16
(
u16
b
,
void
__iomem
*
addr
)
{
__outb
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
b
,
addr
);
mb
();
}
void
_outw
(
u16
b
,
unsigned
long
addr
)
void
iowrite32
(
u32
b
,
void
__iomem
*
addr
)
{
__outw
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
iowrite32
)(
b
,
addr
);
mb
();
}
void
_outl
(
u32
b
,
unsigned
long
addr
)
EXPORT_SYMBOL
(
ioread8
);
EXPORT_SYMBOL
(
ioread16
);
EXPORT_SYMBOL
(
ioread32
);
EXPORT_SYMBOL
(
iowrite8
);
EXPORT_SYMBOL
(
iowrite16
);
EXPORT_SYMBOL
(
iowrite32
);
u8
inb
(
unsigned
long
port
)
{
__outl
(
b
,
addr
);
return
ioread8
(
ioport_map
(
port
,
1
)
);
}
u
8
___raw_readb
(
const
volatile
void
__iomem
*
addr
)
u
16
inw
(
unsigned
long
port
)
{
return
__readb
(
addr
);
return
ioread16
(
ioport_map
(
port
,
2
)
);
}
u
16
___raw_readw
(
const
volatile
void
__iomem
*
addr
)
u
32
inl
(
unsigned
long
port
)
{
return
__readw
(
addr
);
return
ioread32
(
ioport_map
(
port
,
4
)
);
}
u32
___raw_readl
(
const
volatile
void
__iomem
*
addr
)
void
outb
(
u8
b
,
unsigned
long
port
)
{
return
__readl
(
addr
);
iowrite8
(
b
,
ioport_map
(
port
,
1
)
);
}
u64
___raw_readq
(
const
volatile
void
__iomem
*
addr
)
void
outw
(
u16
b
,
unsigned
long
port
)
{
return
__readq
(
addr
);
iowrite16
(
b
,
ioport_map
(
port
,
2
)
);
}
u8
_readb
(
const
volatile
void
__iomem
*
addr
)
void
outl
(
u32
b
,
unsigned
long
port
)
{
unsigned
long
r
=
__readb
(
addr
);
mb
();
return
r
;
iowrite32
(
b
,
ioport_map
(
port
,
4
));
}
u16
_readw
(
const
volatile
void
__iomem
*
addr
)
EXPORT_SYMBOL
(
inb
);
EXPORT_SYMBOL
(
inw
);
EXPORT_SYMBOL
(
inl
);
EXPORT_SYMBOL
(
outb
);
EXPORT_SYMBOL
(
outw
);
EXPORT_SYMBOL
(
outl
);
u8
__raw_readb
(
const
volatile
void
__iomem
*
addr
)
{
unsigned
long
r
=
__readw
(
addr
);
mb
();
return
r
;
return
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
addr
);
}
u
32
_readl
(
const
volatile
void
__iomem
*
addr
)
u
16
__raw_readw
(
const
volatile
void
__iomem
*
addr
)
{
unsigned
long
r
=
__readl
(
addr
);
mb
();
return
r
;
return
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
addr
);
}
u
64
_readq
(
const
volatile
void
__iomem
*
addr
)
u
32
__raw_readl
(
const
volatile
void
__iomem
*
addr
)
{
unsigned
long
r
=
__readq
(
addr
);
mb
();
return
r
;
return
IO_CONCAT
(
__IO_PREFIX
,
readl
)(
addr
);
}
void
___raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
u64
__raw_readq
(
const
volatile
void
__iomem
*
addr
)
{
__writeb
(
b
,
addr
);
return
IO_CONCAT
(
__IO_PREFIX
,
readq
)(
addr
);
}
void
__
_raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
void
__
raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__writew
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
writeb
)
(
b
,
addr
);
}
void
__
_raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
void
__
raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__writel
(
b
,
addr
);
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
b
,
addr
);
}
void
__raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writel
)(
b
,
addr
);
}
void
__raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeq
)(
b
,
addr
);
}
EXPORT_SYMBOL
(
__raw_readb
);
EXPORT_SYMBOL
(
__raw_readw
);
EXPORT_SYMBOL
(
__raw_readl
);
EXPORT_SYMBOL
(
__raw_readq
);
EXPORT_SYMBOL
(
__raw_writeb
);
EXPORT_SYMBOL
(
__raw_writew
);
EXPORT_SYMBOL
(
__raw_writel
);
EXPORT_SYMBOL
(
__raw_writeq
);
u8
readb
(
const
volatile
void
__iomem
*
addr
)
{
u8
ret
=
__raw_readb
(
addr
);
mb
();
return
ret
;
}
u16
readw
(
const
volatile
void
__iomem
*
addr
)
{
u16
ret
=
__raw_readw
(
addr
);
mb
();
return
ret
;
}
u32
readl
(
const
volatile
void
__iomem
*
addr
)
{
u32
ret
=
__raw_readl
(
addr
);
mb
();
return
ret
;
}
void
___raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
u64
readq
(
const
volatile
void
__iomem
*
addr
)
{
__writeq
(
b
,
addr
);
u64
ret
=
__raw_readq
(
addr
);
mb
();
return
ret
;
}
void
_
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
void
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__writeb
(
b
,
addr
);
__
raw_
writeb
(
b
,
addr
);
mb
();
}
void
_
writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
void
writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__writew
(
b
,
addr
);
__
raw_
writew
(
b
,
addr
);
mb
();
}
void
_
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
void
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
__writel
(
b
,
addr
);
__
raw_
writel
(
b
,
addr
);
mb
();
}
void
_
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
void
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
__writeq
(
b
,
addr
);
__
raw_
writeq
(
b
,
addr
);
mb
();
}
EXPORT_SYMBOL
(
readb
);
EXPORT_SYMBOL
(
readw
);
EXPORT_SYMBOL
(
readl
);
EXPORT_SYMBOL
(
readq
);
EXPORT_SYMBOL
(
writeb
);
EXPORT_SYMBOL
(
writew
);
EXPORT_SYMBOL
(
writel
);
EXPORT_SYMBOL
(
writeq
);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
*/
void
i
nsb
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
void
i
oread8_rep
(
void
__iomem
*
port
,
void
*
dst
,
unsigned
long
count
)
{
while
((
(
unsigned
long
)
dst
)
&
0x3
)
{
while
((
unsigned
long
)
dst
&
0x3
)
{
if
(
!
count
)
return
;
count
--
;
*
(
unsigned
char
*
)
dst
=
inb
(
port
);
*
(
unsigned
char
*
)
dst
=
ioread8
(
port
);
dst
+=
1
;
}
while
(
count
>=
4
)
{
unsigned
int
w
;
count
-=
4
;
w
=
i
nb
(
port
);
w
|=
i
nb
(
port
)
<<
8
;
w
|=
i
nb
(
port
)
<<
16
;
w
|=
i
nb
(
port
)
<<
24
;
*
(
unsigned
int
*
)
dst
=
w
;
w
=
i
oread8
(
port
);
w
|=
i
oread8
(
port
)
<<
8
;
w
|=
i
oread8
(
port
)
<<
16
;
w
|=
i
oread8
(
port
)
<<
24
;
*
(
unsigned
int
*
)
dst
=
w
;
dst
+=
4
;
}
while
(
count
)
{
--
count
;
*
(
unsigned
char
*
)
dst
=
inb
(
port
);
*
(
unsigned
char
*
)
dst
=
ioread8
(
port
);
dst
+=
1
;
}
}
void
insb
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
{
ioread8_rep
(
ioport_map
(
port
,
1
),
dst
,
count
);
}
EXPORT_SYMBOL
(
ioread8_rep
);
EXPORT_SYMBOL
(
insb
);
/*
* Read COUNT 16-bit words from port PORT into memory starting at
...
...
@@ -172,33 +254,39 @@ void insb (unsigned long port, void *dst, unsigned long count)
* the interfaces seems to be slow: just using the inlined version
* of the inw() breaks things.
*/
void
i
nsw
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
void
i
oread16_rep
(
void
__iomem
*
port
,
void
*
dst
,
unsigned
long
count
)
{
if
(((
unsigned
long
)
dst
)
&
0x3
)
{
if
(((
unsigned
long
)
dst
)
&
0x1
)
{
panic
(
"insw: memory not short aligned"
);
}
if
(
unlikely
((
unsigned
long
)
dst
&
0x3
))
{
if
(
!
count
)
return
;
BUG_ON
((
unsigned
long
)
dst
&
0x1
);
count
--
;
*
(
unsigned
short
*
)
dst
=
inw
(
port
);
*
(
unsigned
short
*
)
dst
=
ioread16
(
port
);
dst
+=
2
;
}
while
(
count
>=
2
)
{
unsigned
int
w
;
count
-=
2
;
w
=
i
nw
(
port
);
w
|=
i
nw
(
port
)
<<
16
;
*
(
unsigned
int
*
)
dst
=
w
;
w
=
i
oread16
(
port
);
w
|=
i
oread16
(
port
)
<<
16
;
*
(
unsigned
int
*
)
dst
=
w
;
dst
+=
4
;
}
if
(
count
)
{
*
(
unsigned
short
*
)
dst
=
inw
(
port
);
*
(
unsigned
short
*
)
dst
=
ioread16
(
port
);
}
}
void
insw
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
{
ioread16_rep
(
ioport_map
(
port
,
2
),
dst
,
count
);
}
EXPORT_SYMBOL
(
ioread16_rep
);
EXPORT_SYMBOL
(
insw
);
/*
* Read COUNT 32-bit words from port PORT into memory starting at
...
...
@@ -206,80 +294,31 @@ void insw (unsigned long port, void *dst, unsigned long count)
* but the interfaces seems to be slow: just using the inlined version
* of the inl() breaks things.
*/
void
i
nsl
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
void
i
oread32_rep
(
void
__iomem
*
port
,
void
*
dst
,
unsigned
long
count
)
{
unsigned
int
l
=
0
,
l2
;
if
(
!
count
)
return
;
switch
(((
unsigned
long
)
dst
)
&
0x3
)
{
case
0x00
:
/* Buffer 32-bit aligned */
while
(
count
--
)
{
*
(
unsigned
int
*
)
dst
=
inl
(
port
);
if
(
unlikely
((
unsigned
long
)
dst
&
0x3
))
{
while
(
count
--
)
{
struct
S
{
int
x
__attribute__
((
packed
));
};
((
struct
S
*
)
dst
)
->
x
=
ioread32
(
port
);
dst
+=
4
;
}
break
;
/* Assuming little endian Alphas in cases 0x01 -- 0x03 ... */
case
0x02
:
/* Buffer 16-bit aligned */
--
count
;
l
=
inl
(
port
);
*
(
unsigned
short
*
)
dst
=
l
;
dst
+=
2
;
while
(
count
--
)
{
l2
=
inl
(
port
);
*
(
unsigned
int
*
)
dst
=
l
>>
16
|
l2
<<
16
;
}
else
{
/* Buffer 32-bit aligned. */
while
(
count
--
)
{
*
(
unsigned
int
*
)
dst
=
ioread32
(
port
);
dst
+=
4
;
l
=
l2
;
}
*
(
unsigned
short
*
)
dst
=
l
>>
16
;
break
;
case
0x01
:
/* Buffer 8-bit aligned */
--
count
;
l
=
inl
(
port
);
*
(
unsigned
char
*
)
dst
=
l
;
dst
+=
1
;
*
(
unsigned
short
*
)
dst
=
l
>>
8
;
dst
+=
2
;
while
(
count
--
)
{
l2
=
inl
(
port
);
*
(
unsigned
int
*
)
dst
=
l
>>
24
|
l2
<<
8
;
dst
+=
4
;
l
=
l2
;
}
*
(
unsigned
char
*
)
dst
=
l
>>
24
;
break
;
case
0x03
:
/* Buffer 8-bit aligned */
--
count
;
}
l
=
inl
(
port
);
*
(
unsigned
char
*
)
dst
=
l
;
dst
+=
1
;
while
(
count
--
)
{
l2
=
inl
(
port
);
*
(
unsigned
int
*
)
dst
=
l
<<
24
|
l2
>>
8
;
dst
+=
4
;
l
=
l2
;
}
*
(
unsigned
short
*
)
dst
=
l
>>
8
;
dst
+=
2
;
*
(
unsigned
char
*
)
dst
=
l
>>
24
;
break
;
}
void
insl
(
unsigned
long
port
,
void
*
dst
,
unsigned
long
count
)
{
ioread32_rep
(
ioport_map
(
port
,
4
),
dst
,
count
);
}
EXPORT_SYMBOL
(
ioread32_rep
);
EXPORT_SYMBOL
(
insl
);
/*
* Like insb but in the opposite direction.
...
...
@@ -287,28 +326,35 @@ void insl (unsigned long port, void *dst, unsigned long count)
* doing byte reads the "slow" way isn't nearly as slow as
* doing byte writes the slow way (no r-m-w cycle).
*/
void
outsb
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
void
iowrite8_rep
(
void
__iomem
*
port
,
const
void
*
x
src
,
unsigned
long
count
)
{
while
(
count
)
{
count
--
;
outb
(
*
(
char
*
)
src
,
port
);
src
+=
1
;
}
const
unsigned
char
*
src
=
xsrc
;
while
(
count
--
)
iowrite8
(
*
src
++
,
port
);
}
void
outsb
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
{
iowrite8_rep
(
ioport_map
(
port
,
1
),
src
,
count
);
}
EXPORT_SYMBOL
(
iowrite8_rep
);
EXPORT_SYMBOL
(
outsb
);
/*
* Like insw but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Performance is important, but the
* interfaces seems to be slow: just using the inlined version of the
* outw() breaks things.
*/
void
outsw
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
void
iowrite16_rep
(
void
__iomem
*
port
,
const
void
*
src
,
unsigned
long
count
)
{
if
(
((
unsigned
long
)
src
)
&
0x3
)
{
if
(
((
unsigned
long
)
src
)
&
0x1
)
{
panic
(
"outsw: memory not short aligned"
)
;
}
outw
(
*
(
unsigned
short
*
)
src
,
port
);
if
(
unlikely
((
unsigned
long
)
src
&
0x3
)
)
{
if
(
!
count
)
return
;
BUG_ON
((
unsigned
long
)
src
&
0x1
);
iowrite16
(
*
(
unsigned
short
*
)
src
,
port
);
src
+=
2
;
--
count
;
}
...
...
@@ -316,17 +362,25 @@ void outsw (unsigned long port, const void *src, unsigned long count)
while
(
count
>=
2
)
{
unsigned
int
w
;
count
-=
2
;
w
=
*
(
unsigned
int
*
)
src
;
w
=
*
(
unsigned
int
*
)
src
;
src
+=
4
;
outw
(
w
>>
0
,
port
);
outw
(
w
>>
16
,
port
);
iowrite16
(
w
>>
0
,
port
);
iowrite16
(
w
>>
16
,
port
);
}
if
(
count
)
{
outw
(
*
(
unsigned
short
*
)
src
,
port
);
iowrite16
(
*
(
unsigned
short
*
)
src
,
port
);
}
}
void
outsw
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
{
iowrite16_rep
(
ioport_map
(
port
,
2
),
src
,
count
);
}
EXPORT_SYMBOL
(
iowrite16_rep
);
EXPORT_SYMBOL
(
outsw
);
/*
* Like insl but in the opposite direction. This is used by the IDE
...
...
@@ -334,84 +388,37 @@ void outsw (unsigned long port, const void *src, unsigned long count)
* Performance is important, but the interfaces seems to be slow:
* just using the inlined version of the outl() breaks things.
*/
void
outsl
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
void
iowrite32_rep
(
void
__iomem
*
port
,
const
void
*
src
,
unsigned
long
count
)
{
unsigned
int
l
=
0
,
l2
;
if
(
!
count
)
return
;
switch
(((
unsigned
long
)
src
)
&
0x3
)
{
case
0x00
:
/* Buffer 32-bit aligned */
while
(
count
--
)
{
outl
(
*
(
unsigned
int
*
)
src
,
port
);
if
(
unlikely
((
unsigned
long
)
src
&
0x3
))
{
while
(
count
--
)
{
struct
S
{
int
x
__attribute__
((
packed
));
};
iowrite32
(((
struct
S
*
)
src
)
->
x
,
port
);
src
+=
4
;
}
break
;
case
0x02
:
/* Buffer 16-bit aligned */
--
count
;
l
=
*
(
unsigned
short
*
)
src
<<
16
;
src
+=
2
;
while
(
count
--
)
{
l2
=
*
(
unsigned
int
*
)
src
;
}
else
{
/* Buffer 32-bit aligned. */
while
(
count
--
)
{
iowrite32
(
*
(
unsigned
int
*
)
src
,
port
);
src
+=
4
;
outl
(
l
>>
16
|
l2
<<
16
,
port
);
l
=
l2
;
}
l2
=
*
(
unsigned
short
*
)
src
;
outl
(
l
>>
16
|
l2
<<
16
,
port
);
break
;
case
0x01
:
/* Buffer 8-bit aligned */
--
count
;
l
=
*
(
unsigned
char
*
)
src
<<
8
;
src
+=
1
;
l
|=
*
(
unsigned
short
*
)
src
<<
16
;
src
+=
2
;
while
(
count
--
)
{
l2
=
*
(
unsigned
int
*
)
src
;
src
+=
4
;
outl
(
l
>>
8
|
l2
<<
24
,
port
);
l
=
l2
;
}
l2
=
*
(
unsigned
char
*
)
src
;
outl
(
l
>>
8
|
l2
<<
24
,
port
);
break
;
case
0x03
:
/* Buffer 8-bit aligned */
--
count
;
}
l
=
*
(
unsigned
char
*
)
src
<<
24
;
src
+=
1
;
while
(
count
--
)
{
l2
=
*
(
unsigned
int
*
)
src
;
src
+=
4
;
outl
(
l
>>
24
|
l2
<<
8
,
port
);
l
=
l2
;
}
l2
=
*
(
unsigned
short
*
)
src
;
src
+=
2
;
l2
|=
*
(
unsigned
char
*
)
src
<<
16
;
outl
(
l
>>
24
|
l2
<<
8
,
port
);
break
;
}
void
outsl
(
unsigned
long
port
,
const
void
*
src
,
unsigned
long
count
)
{
iowrite32_rep
(
ioport_map
(
port
,
4
),
src
,
count
);
}
EXPORT_SYMBOL
(
iowrite32_rep
);
EXPORT_SYMBOL
(
outsl
);
/*
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
void
_memcpy_fromio
(
void
*
to
,
const
volatile
void
__iomem
*
from
,
long
count
)
void
memcpy_fromio
(
void
*
to
,
const
volatile
void
__iomem
*
from
,
long
count
)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
...
...
@@ -458,11 +465,14 @@ void _memcpy_fromio(void * to, const volatile void __iomem *from, long count)
mb
();
}
EXPORT_SYMBOL
(
memcpy_fromio
);
/*
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
void
_memcpy_toio
(
volatile
void
__iomem
*
to
,
const
void
*
from
,
long
count
)
void
memcpy_toio
(
volatile
void
__iomem
*
to
,
const
void
*
from
,
long
count
)
{
/* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
...
...
@@ -510,6 +520,9 @@ void _memcpy_toio(volatile void __iomem *to, const void * from, long count)
mb
();
}
EXPORT_SYMBOL
(
memcpy_toio
);
/*
* "memset" on IO memory space.
*/
...
...
@@ -569,6 +582,11 @@ void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
mb
();
}
EXPORT_SYMBOL
(
_memset_c_io
);
/* A version of memcpy used by the vga console routines to move data around
arbitrarily between screen and main memory. */
void
scr_memcpyw
(
u16
*
d
,
const
u16
*
s
,
unsigned
int
count
)
{
...
...
@@ -597,3 +615,17 @@ scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
memcpy
(
d
,
s
,
count
);
}
}
EXPORT_SYMBOL
(
scr_memcpyw
);
void
__iomem
*
ioport_map
(
unsigned
long
port
,
unsigned
int
size
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioportmap
)
(
port
);
}
void
ioport_unmap
(
void
__iomem
*
addr
)
{
}
EXPORT_SYMBOL
(
ioport_map
);
EXPORT_SYMBOL
(
ioport_unmap
);
include/asm-alpha/compiler.h
View file @
63dd622b
...
...
@@ -90,4 +90,14 @@
__asm__("stw %1,%0" : "=m"(mem) : "r"(val))
#endif
/* Some idiots over in <linux/compiler.h> thought inline should imply
always_inline. This breaks stuff. We'll include this file whenever
we run into such problems. */
#include <linux/compiler.h>
#undef inline
#undef __inline__
#undef __inline
#endif
/* __ALPHA_COMPILER_H */
include/asm-alpha/core_apecs.h
View file @
63dd622b
...
...
@@ -374,178 +374,138 @@ struct el_apecs_procdata
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
apecs_inb
(
unsigned
long
addr
)
#define APECS_SET_HAE \
do { \
if (addr >= (1UL << 24)) { \
unsigned long msb = addr & 0xf8000000; \
addr -= msb; \
set_hae(msb); \
} \
} while (0)
__EXTERN_INLINE
unsigned
int
apecs_ioread8
(
void
__iomem
*
xaddr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
apecs_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
apecs_inw
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
apecs_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x08
)
=
w
;
mb
();
}
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
base_and_type
;
__EXTERN_INLINE
u32
apecs_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x18
);
}
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x00
;
}
__EXTERN_INLINE
void
apecs_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
APECS_IO
+
0x18
)
=
b
;
mb
();
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
__EXTERN_INLINE
u8
apecs_readb
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
apecs_iowrite8
(
u8
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x00
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
u
16
apecs_readw
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
u
nsigned
int
apecs_ioread16
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x08
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u32
apecs_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
apecs_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
apecs_
writeb
(
u8
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
apecs_
iowrite16
(
u16
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
APECS_DENSE_MEM
)
{
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
APECS_SET_HAE
;
base_and_type
=
APECS_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
APECS_IO
;
base_and_type
=
APECS_IO
+
0x08
;
}
*
(
vuip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x00
)
=
b
*
0x01010101
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
apecs_writew
(
u16
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
unsigned
int
apecs_ioread32
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
;
addr
-=
APECS_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
}
*
(
vuip
)
((
addr
<<
5
)
+
APECS_SPARSE_MEM
+
0x08
)
=
b
*
0x00010001
;
if
(
addr
<
APECS_DENSE_MEM
)
addr
=
((
addr
-
APECS_IO
)
<<
5
)
+
APECS_IO
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
apecs_
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
apecs_
iowrite32
(
u32
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
APECS_DENSE_MEM
)
addr
=
((
addr
-
APECS_IO
)
<<
5
)
+
APECS_IO
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
apecs_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
apecs_ioportmap
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
APECS_IO
)
;
}
__EXTERN_INLINE
void
__iomem
*
apecs_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
unsigned
long
size
)
{
return
(
void
__iomem
*
)(
addr
+
APECS_DENSE_MEM
);
}
__EXTERN_INLINE
void
apecs_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
apecs_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
IDENT_ADDR
+
0x180000000UL
;
}
__EXTERN_INLINE
int
apecs_is_
ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
apecs_is_
mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
IDENT_ADDR
+
0x180000000UL
;
return
(
unsigned
long
)
addr
>=
APECS_DENSE_MEM
;
}
#undef APECS_SET_HAE
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) apecs_inb((unsigned long)(p))
#define __inw(p) apecs_inw((unsigned long)(p))
#define __inl(p) apecs_inl((unsigned long)(p))
#define __outb(x,p) apecs_outb(x,(unsigned long)(p))
#define __outw(x,p) apecs_outw(x,(unsigned long)(p))
#define __outl(x,p) apecs_outl(x,(unsigned long)(p))
#define __readb(a) apecs_readb(a)
#define __readw(a) apecs_readw(a)
#define __readl(a) apecs_readl(a)
#define __readq(a) apecs_readq(a)
#define __writeb(x,a) apecs_writeb(x,a)
#define __writew(x,a) apecs_writew(x,a)
#define __writel(x,a) apecs_writel(x,a)
#define __writeq(x,a) apecs_writeq(x,a)
#define __ioremap(a,s) apecs_ioremap(a,s)
#define __iounmap(a) apecs_iounmap(a)
#define __is_ioaddr(a) apecs_is_ioaddr((unsigned long)(a))
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX apecs
#define apecs_trivial_io_bw 0
#define apecs_trivial_io_lq 0
#define apecs_trivial_rw_bw 2
#define apecs_trivial_rw_lq 1
#define apecs_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_cia.h
View file @
63dd622b
...
...
@@ -306,90 +306,6 @@ struct el_CIA_sysdata_mcheck {
* get at PCI memory and I/O.
*/
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
cia_inb
(
unsigned
long
addr
)
{
long
result
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
cia_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
cia_inw
(
unsigned
long
addr
)
{
long
result
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
cia_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x08
)
=
w
;
mb
();
}
__EXTERN_INLINE
u32
cia_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x18
);
}
__EXTERN_INLINE
void
cia_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
CIA_IO
+
0x18
)
=
b
;
mb
();
}
__EXTERN_INLINE
u8
cia_bwx_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
CIA BWX I/O region, but that doesn't take care of legacy
ISA crap. */
return
__kernel_ldbu
(
*
(
vucp
)(
addr
+
CIA_BW_IO
));
}
__EXTERN_INLINE
void
cia_bwx_outb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)(
addr
+
CIA_BW_IO
));
mb
();
}
__EXTERN_INLINE
u16
cia_bwx_inw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)(
addr
+
CIA_BW_IO
));
}
__EXTERN_INLINE
void
cia_bwx_outw
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)(
addr
+
CIA_BW_IO
));
mb
();
}
__EXTERN_INLINE
u32
cia_bwx_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)(
addr
+
CIA_BW_IO
);
}
__EXTERN_INLINE
void
cia_bwx_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)(
addr
+
CIA_BW_IO
)
=
b
;
mb
();
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
...
...
@@ -422,116 +338,112 @@ __EXTERN_INLINE void cia_bwx_outl(u32 b, unsigned long addr)
*
*/
__EXTERN_INLINE
u8
cia_readb
(
const
volatile
void
__iomem
*
xaddr
)
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
unsigned
int
cia_ioread8
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
;
unsigned
long
result
,
base_and_type
;
/* We can use CIA_MEM_R1_MASK for io ports too, since it is large
enough to cover all io ports, and smaller than CIA_IO. */
addr
&=
CIA_MEM_R1_MASK
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x00
);
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x00
;
else
base_and_type
=
CIA_IO
+
0x00
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
cia_readw
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
cia_iowrite8
(
u8
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
;
unsigned
long
w
,
base_and_type
;
addr
&=
CIA_MEM_R1_MASK
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x00
;
else
base_and_type
=
CIA_IO
+
0x00
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
cia_writeb
(
u8
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
unsigned
int
cia_ioread16
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
;
unsigned
long
result
,
base_and_type
;
addr
&=
CIA_MEM_R1_MASK
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x00
)
=
w
;
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x08
;
else
base_and_type
=
CIA_IO
+
0x08
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
cia_
writew
(
u16
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
cia_
iowrite16
(
u16
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
w
;
unsigned
long
w
,
base_and_type
;
addr
&=
CIA_MEM_R1_MASK
;
if
(
addr
>=
CIA_DENSE_MEM
)
base_and_type
=
CIA_SPARSE_MEM
+
0x08
;
else
base_and_type
=
CIA_IO
+
0x08
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
CIA_SPARSE_MEM
+
0x08
)
=
w
;
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
u
32
cia_readl
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
u
nsigned
int
cia_ioread32
(
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
CIA_DENSE_MEM
)
addr
=
((
addr
-
CIA_IO
)
<<
5
)
+
CIA_IO
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
cia_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
cia_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
cia_iowrite32
(
u32
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
CIA_DENSE_MEM
)
addr
=
((
addr
-
CIA_IO
)
<<
5
)
+
CIA_IO
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
cia_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
cia_ioportmap
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
CIA_IO
)
;
}
__EXTERN_INLINE
void
__iomem
*
cia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
unsigned
long
size
)
{
return
(
void
__iomem
*
)(
addr
+
CIA_DENSE_MEM
);
}
__EXTERN_INLINE
void
cia_iounmap
(
volatile
void
__iomem
*
addr
)
{
return
;
}
__EXTERN_INLINE
u8
cia_bwx_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
cia_bwx_readw
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
cia_bwx_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
cia_bwx_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
cia_bwx_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
cia_bwx_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
cia_is_ioaddr
(
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
)
;
return
addr
>=
IDENT_ADDR
+
0x8000000000UL
;
}
__EXTERN_INLINE
void
cia_bwx_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
cia_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
return
(
unsigned
long
)
addr
>=
CIA_DENSE_MEM
;
}
__EXTERN_INLINE
void
cia_bwx_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
cia_bwx_ioportmap
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
CIA_BW_IO
)
;
}
__EXTERN_INLINE
void
__iomem
*
cia_bwx_ioremap
(
unsigned
long
addr
,
...
...
@@ -540,81 +452,44 @@ __EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr,
return
(
void
__iomem
*
)(
addr
+
CIA_BW_MEM
);
}
__EXTERN_INLINE
void
cia_bwx_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
cia_bwx_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
IDENT_ADDR
+
0x8000000000UL
;
}
__EXTERN_INLINE
int
cia_
is_ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
cia_
bwx_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
IDENT_ADDR
+
0x8000000000UL
;
return
(
unsigned
long
)
addr
<
CIA_BW_IO
;
}
#undef vucp
#undef vusp
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#undef __IO_PREFIX
#define __IO_PREFIX cia
#define cia_trivial_rw_bw 2
#define cia_trivial_rw_lq 1
#define cia_trivial_io_bw 0
#define cia_trivial_io_lq 0
#define cia_trivial_iounmap 1
#include <asm/io_trivial.h>
#undef __IO_PREFIX
#define __IO_PREFIX cia_bwx
#define cia_bwx_trivial_rw_bw 1
#define cia_bwx_trivial_rw_lq 1
#define cia_bwx_trivial_io_bw 1
#define cia_bwx_trivial_io_lq 1
#define cia_bwx_trivial_iounmap 1
#include <asm/io_trivial.h>
#undef __IO_PREFIX
#ifdef CONFIG_ALPHA_PYXIS
# define __inb(p) cia_bwx_inb((unsigned long)(p))
# define __inw(p) cia_bwx_inw((unsigned long)(p))
# define __inl(p) cia_bwx_inl((unsigned long)(p))
# define __outb(x,p) cia_bwx_outb(x,(unsigned long)(p))
# define __outw(x,p) cia_bwx_outw(x,(unsigned long)(p))
# define __outl(x,p) cia_bwx_outl(x,(unsigned long)(p))
# define __readb(a) cia_bwx_readb(a)
# define __readw(a) cia_bwx_readw(a)
# define __readl(a) cia_bwx_readl(a)
# define __readq(a) cia_bwx_readq(a)
# define __writeb(x,a) cia_bwx_writeb(x,a)
# define __writew(x,a) cia_bwx_writew(x,a)
# define __writel(x,a) cia_bwx_writel(x,a)
# define __writeq(x,a) cia_bwx_writeq(x,a)
# define __ioremap(a,s) cia_bwx_ioremap(a,s)
# define __iounmap(a) cia_bwx_iounmap(a)
# define inb(p) __inb(p)
# define inw(p) __inw(p)
# define inl(p) __inl(p)
# define outb(x,p) __outb(x,p)
# define outw(x,p) __outw(x,p)
# define outl(x,p) __outl(x,p)
# define __raw_readb(a) __readb(a)
# define __raw_readw(a) __readw(a)
# define __raw_readl(a) __readl(a)
# define __raw_readq(a) __readq(a)
# define __raw_writeb(x,a) __writeb(x,a)
# define __raw_writew(x,a) __writew(x,a)
# define __raw_writel(x,a) __writel(x,a)
# define __raw_writeq(x,a) __writeq(x,a)
#define __IO_PREFIX cia_bwx
#else
# define __inb(p) cia_inb((unsigned long)(p))
# define __inw(p) cia_inw((unsigned long)(p))
# define __inl(p) cia_inl((unsigned long)(p))
# define __outb(x,p) cia_outb(x,(unsigned long)(p))
# define __outw(x,p) cia_outw(x,(unsigned long)(p))
# define __outl(x,p) cia_outl(x,(unsigned long)(p))
# define __readb(a) cia_readb(a)
# define __readw(a) cia_readw(a)
# define __readl(a) cia_readl(a)
# define __readq(a) cia_readq(a)
# define __writeb(x,a) cia_writeb(x,a)
# define __writew(x,a) cia_writew(x,a)
# define __writel(x,a) cia_writel(x,a)
# define __writeq(x,a) cia_writeq(x,a)
# define __ioremap(a,s) cia_ioremap(a,s)
# define __iounmap(a) cia_iounmap(a)
# define __raw_readl(a) __readl(a)
# define __raw_readq(a) __readq(a)
# define __raw_writel(v,a) __writel(v,a)
# define __raw_writeq(v,a) __writeq(v,a)
#endif
/* PYXIS */
#define __is_ioaddr(a) cia_is_ioaddr((unsigned long)(a))
#endif
/* __WANT_IO_DEF */
#define __IO_PREFIX cia
#endif
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_irongate.h
View file @
63dd622b
...
...
@@ -190,86 +190,13 @@ struct el_IRONGATE_sysdata_mcheck {
* K7 can only use linear accesses to get at PCI memory and I/O spaces.
*/
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
irongate_inb
(
unsigned
long
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)(
addr
+
IRONGATE_IO
));
}
__EXTERN_INLINE
void
irongate_outb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)(
addr
+
IRONGATE_IO
));
mb
();
}
__EXTERN_INLINE
u16
irongate_inw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)(
addr
+
IRONGATE_IO
));
}
__EXTERN_INLINE
void
irongate_outw
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)(
addr
+
IRONGATE_IO
));
mb
();
}
__EXTERN_INLINE
u32
irongate_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)(
addr
+
IRONGATE_IO
);
}
__EXTERN_INLINE
void
irongate_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)(
addr
+
IRONGATE_IO
)
=
b
;
mb
();
}
/*
* Memory functions. All accesses are done through linear space.
*/
__EXTERN_INLINE
u8
irongate_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
irongate_readw
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
irongate_ioportmap
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
irongate_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
irongate_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
irongate_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
irongate_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
irongate_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
irongate_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
IRONGATE_IO
);
}
extern
void
__iomem
*
irongate_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
...
...
@@ -280,47 +207,20 @@ __EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
return
addr
>=
IRONGATE_MEM
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) irongate_inb((unsigned long)(p))
#define __inw(p) irongate_inw((unsigned long)(p))
#define __inl(p) irongate_inl((unsigned long)(p))
#define __outb(x,p) irongate_outb(x,(unsigned long)(p))
#define __outw(x,p) irongate_outw(x,(unsigned long)(p))
#define __outl(x,p) irongate_outl(x,(unsigned long)(p))
#define __readb(a) irongate_readb(a)
#define __readw(a) irongate_readw(a)
#define __readl(a) irongate_readl(a)
#define __readq(a) irongate_readq(a)
#define __writeb(x,a) irongate_writeb(x,a)
#define __writew(x,a) irongate_writew(x,a)
#define __writel(x,a) irongate_writel(x,a)
#define __writeq(x,a) irongate_writeq(x,a)
#define __ioremap(a,s) irongate_ioremap(a,s)
#define __iounmap(a) irongate_iounmap(a)
#define __is_ioaddr(a) irongate_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb(x,p)
#define outw(x,p) __outw(x,p)
#define outl(x,p) __outl(x,p)
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
__EXTERN_INLINE
int
irongate_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
return
addr
<
IRONGATE_IO
||
addr
>=
IRONGATE_CONF
;
}
#undef __IO_PREFIX
#define __IO_PREFIX irongate
#define irongate_trivial_rw_bw 1
#define irongate_trivial_rw_lq 1
#define irongate_trivial_io_bw 1
#define irongate_trivial_io_lq 1
#define irongate_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_lca.h
View file @
63dd622b
...
...
@@ -219,182 +219,137 @@ union el_lca {
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
lca_inb
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
#define LCA_SET_HAE \
do { \
if (addr >= (1UL << 24)) { \
unsigned long msb = addr & 0xf8000000; \
addr -= msb; \
set_hae(msb); \
} \
} while (0)
__EXTERN_INLINE
void
lca_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
lca_inw
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
lca_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x08
)
=
w
;
mb
();
}
__EXTERN_INLINE
u32
lca_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x18
);
}
__EXTERN_INLINE
void
lca_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
LCA_IO
+
0x18
)
=
b
;
mb
();
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
__EXTERN_INLINE
u8
lca_readb
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
unsigned
int
lca_ioread8
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x00
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x00
);
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
lca_readw
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
lca_iowrite8
(
u8
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
result
,
msb
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x00
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x00
;
}
result
=
*
(
vip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u32
lca_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
lca_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
void
lca_writeb
(
u8
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
unsigned
int
lca_ioread16
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
;
unsigned
long
w
;
unsigned
long
result
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x08
;
}
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x00
)
=
w
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
base_and_type
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
lca_
writew
(
u16
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
lca_
iowrite16
(
u16
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
unsigned
long
msb
;
unsigned
long
w
;
unsigned
long
w
,
base_and_type
;
if
(
addr
>=
LCA_DENSE_MEM
)
{
addr
-=
LCA_DENSE_MEM
;
if
(
addr
>=
(
1UL
<<
24
))
{
msb
=
addr
&
0xf8000000
;
addr
-=
msb
;
set_hae
(
msb
);
LCA_SET_HAE
;
base_and_type
=
LCA_SPARSE_MEM
+
0x08
;
}
else
{
addr
-=
LCA_IO
;
base_and_type
=
LCA_IO
+
0x08
;
}
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
LCA_SPARSE_MEM
+
0x08
)
=
w
;
*
(
vuip
)
((
addr
<<
5
)
+
base_and_type
)
=
w
;
}
__EXTERN_INLINE
unsigned
int
lca_ioread32
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
LCA_DENSE_MEM
)
addr
=
((
addr
-
LCA_IO
)
<<
5
)
+
LCA_IO
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
lca_
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
lca_
iowrite32
(
u32
b
,
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
addr
<
LCA_DENSE_MEM
)
addr
=
((
addr
-
LCA_IO
)
<<
5
)
+
LCA_IO
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
lca_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
lca_ioportmap
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
LCA_IO
)
;
}
__EXTERN_INLINE
void
__iomem
*
lca_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
unsigned
long
size
)
{
return
(
void
__iomem
*
)(
addr
+
LCA_DENSE_MEM
);
}
__EXTERN_INLINE
void
lca_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
lca_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
IDENT_ADDR
+
0x120000000UL
;
}
__EXTERN_INLINE
int
lca_is_
ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
lca_is_
mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
IDENT_ADDR
+
0x120000000UL
;
return
(
unsigned
long
)
addr
>=
LCA_DENSE_MEM
;
}
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) lca_inb((unsigned long)(p))
#define __inw(p) lca_inw((unsigned long)(p))
#define __inl(p) lca_inl((unsigned long)(p))
#define __outb(x,p) lca_outb(x,(unsigned long)(p))
#define __outw(x,p) lca_outw(x,(unsigned long)(p))
#define __outl(x,p) lca_outl(x,(unsigned long)(p))
#define __readb(a) lca_readb(a)
#define __readw(a) lca_readw(a)
#define __readl(a) lca_readl(a)
#define __readq(a) lca_readq(a)
#define __writeb(x,a) lca_writeb(x,a)
#define __writew(x,a) lca_writew(x,a)
#define __writel(x,a) lca_writel(x,a)
#define __writeq(x,a) lca_writeq(x,a)
#define __ioremap(a,s) lca_ioremap(a,s)
#define __iounmap(a) lca_iounmap(a)
#define __is_ioaddr(a) lca_is_ioaddr((unsigned long)(a))
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX lca
#define lca_trivial_rw_bw 2
#define lca_trivial_rw_lq 1
#define lca_trivial_io_bw 0
#define lca_trivial_io_lq 0
#define lca_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_marvel.h
View file @
63dd622b
...
...
@@ -325,217 +325,48 @@ struct io7 {
* I/O functions. All access through linear space.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
#ifdef CONFIG_VGA_HOSE
extern
struct
pci_controller
*
pci_vga_hose
;
# define __marvel_is_port_vga(a) \
(((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
# define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
# define FIXUP_IOADDR_VGA(a) do { \
if (pci_vga_hose && __marvel_is_port_vga(a)) \
a += pci_vga_hose->io_space->start; \
} while(0)
#else
# define FIXUP_IOADDR_VGA(a)
#endif
#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
#define FIXUP_IOADDR_LEGACY(a)
#define FIXUP_IOADDR(a) do { \
FIXUP_IOADDR_VGA(a); \
FIXUP_IOADDR_LEGACY(a); \
} while(0)
#if 0
# define IOBUG(x) printk x
# define IOBUG_FILTER_IOADDR(a, x) \
if (!__marvel_is_port_kbd(a) && !__marvel_is_port_rtc(a)) IOBUG(x)
#else
# define IOBUG(x)
# define IOBUG_FILTER_IOADDR(a, x)
#endif
extern
u8
__marvel_rtc_io
(
int
write
,
u8
b
,
unsigned
long
addr
);
#define __marvel_rtc_inb(a) __marvel_rtc_io(0, 0, (a))
#define __marvel_rtc_outb(b, a) __marvel_rtc_io(1, (b), (a))
__EXTERN_INLINE
int
marvel_is_ioaddr
(
unsigned
long
addr
)
{
return
(
addr
&
(
1UL
<<
40
))
!=
0
;
/*FIXME - hardwire*/
}
__EXTERN_INLINE
u8
marvel_inb
(
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
if
(
__marvel_is_port_kbd
(
addr
))
return
(
u8
)
0
;
if
(
__marvel_is_port_rtc
(
addr
))
return
__marvel_rtc_inb
(
addr
);
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
(
u8
)
-
1
;
}
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
marvel_outb
(
u8
b
,
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
if
(
__marvel_is_port_rtc
(
addr
))
return
(
void
)
__marvel_rtc_outb
(
b
,
addr
);
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
;
}
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
marvel_inw
(
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
(
u16
)
-
1
;
}
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
marvel_outw
(
u16
w
,
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
;
}
__kernel_stw
(
w
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
marvel_inl
(
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
(
u32
)
-
1
;
}
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
marvel_outl
(
u32
l
,
unsigned
long
addr
)
{
FIXUP_IOADDR
(
addr
);
if
(
!
marvel_is_ioaddr
(
addr
))
{
IOBUG_FILTER_IOADDR
(
addr
,
(
"Bad IO addr %lx - reading -1
\n
"
,
addr
));
return
;
}
*
(
vuip
)
addr
=
l
;
mb
();
}
/*
* Memory functions. All accesses through linear space.
*/
extern
void
__iomem
*
marvel_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
marvel_iounmap
(
volatile
void
__iomem
*
addr
);
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
__EXTERN_INLINE
u8
marvel_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
extern
unsigned
int
marvel_ioread8
(
void
__iomem
*
);
extern
void
marvel_iowrite8
(
u8
b
,
void
__iomem
*
);
__EXTERN_INLINE
u
16
marvel_readw
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
u
nsigned
int
marvel_ioread16
(
void
__iomem
*
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
marvel_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
marvel_readq
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
marvel_iowrite16
(
u16
b
,
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
)
;
}
__EXTERN_INLINE
void
marvel_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
marvel_writew
(
u16
w
,
volatile
void
__iomem
*
addr
)
{
__kernel_stw
(
w
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
marvel_writel
(
u32
l
,
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
l
;
}
extern
void
__iomem
*
marvel_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
marvel_iounmap
(
volatile
void
__iomem
*
addr
);
extern
void
__iomem
*
marvel_ioportmap
(
unsigned
long
addr
);
__EXTERN_INLINE
void
marvel_writeq
(
u64
q
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
marvel_is_ioaddr
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
q
;
return
(
addr
>>
40
)
&
1
;
}
#undef FIXUP_IOADDR
#undef FIXUP_IOADDR_LEGACY
#undef FIXUP_IOADDR_VGA
extern
int
marvel_is_mmio
(
const
volatile
void
__iomem
*
);
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) marvel_inb((unsigned long)(p))
#define __inw(p) marvel_inw((unsigned long)(p))
#define __inl(p) marvel_inl((unsigned long)(p))
#define __outb(x,p) marvel_outb(x,(unsigned long)(p))
#define __outw(x,p) marvel_outw(x,(unsigned long)(p))
#define __outl(x,p) marvel_outl(x,(unsigned long)(p))
#define __readb(a) marvel_readb(a)
#define __readw(a) marvel_readw(a)
#define __readl(a) marvel_readl(a)
#define __readq(a) marvel_readq(a)
#define __writeb(x,a) marvel_writeb(x,a)
#define __writew(x,a) marvel_writew(x,a)
#define __writel(x,a) marvel_writel(x,a)
#define __writeq(x,a) marvel_writeq(x,a)
#define __ioremap(a,s) marvel_ioremap(a,s)
#define __iounmap(a) marvel_iounmap(a)
#define __is_ioaddr(a) marvel_is_ioaddr((unsigned long)(a))
/* Disable direct inlining of these calls with the debug checks present. */
#if 0
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX marvel
#define marvel_trivial_rw_bw 1
#define marvel_trivial_rw_lq 1
#define marvel_trivial_io_bw 0
#define marvel_trivial_io_lq 1
#define marvel_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
# undef __EXTERN_INLINE
...
...
include/asm-alpha/core_mcpcia.h
View file @
63dd622b
...
...
@@ -211,91 +211,6 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
* Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE.
*/
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
mcpcia_inb
(
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
result
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
hose
+=
MCPCIA_IO_BIAS
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
mcpcia_outb
(
u8
b
,
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
w
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x00
)
=
w
;
mb
();
}
__EXTERN_INLINE
u16
mcpcia_inw
(
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
result
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
mcpcia_outw
(
u16
b
,
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
,
w
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x08
)
=
w
;
mb
();
}
__EXTERN_INLINE
u32
mcpcia_inl
(
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
return
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x18
);
}
__EXTERN_INLINE
void
mcpcia_outl
(
u32
b
,
unsigned
long
in_addr
)
{
unsigned
long
addr
,
hose
;
addr
=
in_addr
&
0xffffUL
;
hose
=
in_addr
&
~
0xffffUL
;
hose
+=
MCPCIA_IO_BIAS
;
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x18
)
=
b
;
mb
();
}
/*
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
...
...
@@ -328,149 +243,131 @@ __EXTERN_INLINE void mcpcia_outl(u32 b, unsigned long in_addr)
*
*/
__EXTERN_INLINE
void
__iomem
*
mcpcia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
{
return
(
void
__iomem
*
)(
addr
+
MCPCIA_MEM_BIAS
);
}
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
__EXTERN_INLINE
void
mcpcia_iounmap
(
volatile
void
__iomem
*
addr
)
{
return
;
}
#ifdef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \
set_hae(hose & 0xffffffff); \
hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \
}
#else
#define MCPCIA_FROB_MMIO \
if (__mcpcia_is_mmio(hose)) { \
hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \
}
#endif
__EXTERN_INLINE
int
mcpcia_is_ioaddr
(
unsigned
long
addr
)
static
inline
int
__mcpcia_is_mmio
(
unsigned
long
addr
)
{
return
addr
>=
MCPCIA_SPARSE
(
0
)
;
return
(
addr
&
0x80000000UL
)
==
0
;
}
__EXTERN_INLINE
u
8
mcpcia_readb
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
u
nsigned
int
mcpcia_ioread8
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
0xffffffffUL
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
0xffffffffUL
;
unsigned
long
result
,
work
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
result
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
work
=
((
addr
<<
5
)
+
hose
+
0x00
);
result
=
*
(
vip
)
work
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
u16
mcpcia_readw
(
const
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
mcpcia_iowrite8
(
u8
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
0xffffffffUL
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
0xffffffffUL
;
unsigned
long
result
,
work
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
w
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
work
=
((
addr
<<
5
)
+
hose
+
0x08
);
result
=
*
(
vip
)
work
;
return
__kernel_extwl
(
result
,
addr
&
3
);
w
=
__kernel_insbl
(
b
,
addr
&
3
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x00
)
=
w
;
}
__EXTERN_INLINE
void
mcpcia_writeb
(
u8
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
unsigned
int
mcpcia_ioread16
(
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
0xffffffffUL
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
0xffffffffUL
;
unsigned
long
w
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
result
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
w
=
__kernel_insbl
(
b
,
addr
&
3
);
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x00
)
=
w
;
result
=
*
(
vip
)
((
addr
<<
5
)
+
hose
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
mcpcia_
writew
(
u16
b
,
volatile
void
__iomem
*
xaddr
)
__EXTERN_INLINE
void
mcpcia_
iowrite16
(
u16
b
,
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
0xffffffffUL
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
0xffffffffUL
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
&
MCPCIA_MEM_MASK
;
unsigned
long
hose
=
(
unsigned
long
)
xaddr
&
~
MCPCIA_MEM_MASK
;
unsigned
long
w
;
#ifndef MCPCIA_ONE_HAE_WINDOW
unsigned
long
msb
;
msb
=
addr
&
~
MCPCIA_MEM_MASK
;
set_hae
(
msb
);
#endif
addr
=
addr
&
MCPCIA_MEM_MASK
;
MCPCIA_FROB_MMIO
;
w
=
__kernel_inswl
(
b
,
addr
&
3
);
hose
=
hose
-
MCPCIA_DENSE
(
4
)
+
MCPCIA_SPARSE
(
4
);
*
(
vuip
)
((
addr
<<
5
)
+
hose
+
0x08
)
=
w
;
}
__EXTERN_INLINE
u
32
mcpcia_readl
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
u
nsigned
int
mcpcia_ioread32
(
void
__iomem
*
x
addr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
!
__mcpcia_is_mmio
(
addr
))
addr
=
((
addr
&
0xffff
)
<<
5
)
+
(
addr
&
~
0xfffful
)
+
0x18
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
mcpcia_readq
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
mcpcia_iowrite32
(
u32
b
,
void
__iomem
*
x
addr
)
{
return
*
(
vulp
)
addr
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
if
(
!
__mcpcia_is_mmio
(
addr
))
addr
=
((
addr
&
0xffff
)
<<
5
)
+
(
addr
&
~
0xfffful
)
+
0x18
;
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
mcpcia_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
mcpcia_ioportmap
(
unsigned
long
addr
)
{
*
(
vuip
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
MCPCIA_IO_BIAS
);
}
__EXTERN_INLINE
void
__iomem
*
mcpcia_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
(
void
__iomem
*
)(
addr
+
MCPCIA_MEM_BIAS
);
}
__EXTERN_INLINE
void
mcpcia_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
mcpcia_is_ioaddr
(
unsigned
long
addr
)
{
*
(
vulp
)
addr
=
b
;
return
addr
>=
MCPCIA_SPARSE
(
0
)
;
}
#undef vucp
#undef vusp
__EXTERN_INLINE
int
mcpcia_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
return
__mcpcia_is_mmio
(
addr
);
}
#undef MCPCIA_FROB_MMIO
#undef vip
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) mcpcia_inb((unsigned long)(p))
#define __inw(p) mcpcia_inw((unsigned long)(p))
#define __inl(p) mcpcia_inl((unsigned long)(p))
#define __outb(x,p) mcpcia_outb(x,(unsigned long)(p))
#define __outw(x,p) mcpcia_outw(x,(unsigned long)(p))
#define __outl(x,p) mcpcia_outl(x,(unsigned long)(p))
#define __readb(a) mcpcia_readb(a)
#define __readw(a) mcpcia_readw(a)
#define __readl(a) mcpcia_readl(a)
#define __readq(a) mcpcia_readq(a)
#define __writeb(x,a) mcpcia_writeb(x,a)
#define __writew(x,a) mcpcia_writew(x,a)
#define __writel(x,a) mcpcia_writel(x,a)
#define __writeq(x,a) mcpcia_writeq(x,a)
#define __ioremap(a,s) mcpcia_ioremap(a,s)
#define __iounmap(a) mcpcia_iounmap(a)
#define __is_ioaddr(a) mcpcia_is_ioaddr((unsigned long)(a))
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX mcpcia
#define mcpcia_trivial_rw_bw 2
#define mcpcia_trivial_rw_lq 1
#define mcpcia_trivial_io_bw 0
#define mcpcia_trivial_io_lq 0
#define mcpcia_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_polaris.h
View file @
63dd622b
...
...
@@ -63,49 +63,6 @@ struct el_POLARIS_sysdata_mcheck {
* However, we will support only the BWX form.
*/
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
polaris_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
POLARIS I/O region, but that doesn't take care of legacy
ISA crap. */
return
__kernel_ldbu
(
*
(
vucp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
}
__EXTERN_INLINE
void
polaris_outb
(
u8
b
,
unsigned
long
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
mb
();
}
__EXTERN_INLINE
u16
polaris_inw
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
}
__EXTERN_INLINE
void
polaris_outw
(
u16
b
,
unsigned
long
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)(
addr
+
POLARIS_DENSE_IO_BASE
));
mb
();
}
__EXTERN_INLINE
u32
polaris_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)(
addr
+
POLARIS_DENSE_IO_BASE
);
}
__EXTERN_INLINE
void
polaris_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)(
addr
+
POLARIS_DENSE_IO_BASE
)
=
b
;
mb
();
}
/*
* Memory functions. Polaris allows all accesses (byte/word
* as well as long/quad) to be done through dense space.
...
...
@@ -113,104 +70,35 @@ __EXTERN_INLINE void polaris_outl(u32 b, unsigned long addr)
* We will only support DENSE access via BWX insns.
*/
__EXTERN_INLINE
u8
polaris_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
polaris_readw
(
const
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
polaris_ioportmap
(
unsigned
long
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
polaris_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
polaris_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
polaris_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
polaris_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
polaris_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
polaris_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vulp
)
addr
=
b
;
return
(
void
__iomem
*
)(
addr
+
POLARIS_DENSE_IO_BASE
);
}
__EXTERN_INLINE
void
__iomem
*
polaris_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
unsigned
long
size
)
{
return
(
void
__iomem
*
)(
addr
+
POLARIS_DENSE_MEM_BASE
);
}
__EXTERN_INLINE
void
polaris_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
polaris_is_ioaddr
(
unsigned
long
addr
)
{
return
;
return
addr
>=
POLARIS_SPARSE_MEM_BASE
;
}
__EXTERN_INLINE
int
polaris_is_
ioaddr
(
unsigned
long
addr
)
__EXTERN_INLINE
int
polaris_is_
mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
addr
>=
POLARIS_SPARSE_MEM
_BASE
;
return
(
unsigned
long
)
addr
<
POLARIS_SPARSE_IO
_BASE
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) polaris_inb((unsigned long)(p))
#define __inw(p) polaris_inw((unsigned long)(p))
#define __inl(p) polaris_inl((unsigned long)(p))
#define __outb(x,p) polaris_outb(x,(unsigned long)(p))
#define __outw(x,p) polaris_outw(x,(unsigned long)(p))
#define __outl(x,p) polaris_outl(x,(unsigned long)(p))
#define __readb(a) polaris_readb(a)
#define __readw(a) polaris_readw(a)
#define __readl(a) polaris_readl(a)
#define __readq(a) polaris_readq(a)
#define __writeb(x,a) polaris_writeb(x,a)
#define __writew(x,a) polaris_writew(x,a)
#define __writel(x,a) polaris_writel(x,a)
#define __writeq(x,a) polaris_writeq(x,a)
#define __ioremap(a,s) polaris_ioremap(a,s)
#define __iounmap(a) polaris_iounmap(a)
#define __is_ioaddr(a) polaris_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb(x,p)
#define outw(x,p) __outw(x,p)
#define outl(x,p) __outl(x,p)
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX polaris
#define polaris_trivial_rw_bw 1
#define polaris_trivial_rw_lq 1
#define polaris_trivial_io_bw 1
#define polaris_trivial_io_lq 1
#define polaris_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_t2.h
View file @
63dd622b
...
...
@@ -357,13 +357,13 @@ struct el_t2_frame_corrected {
#define vip volatile int *
#define vuip volatile unsigned int *
__EXTERN_INLINE
u8
t2_inb
(
unsigned
long
addr
)
static
inline
u8
t2_inb
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
T2_IO
+
0x00
);
return
__kernel_extbl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
t2_outb
(
u8
b
,
unsigned
long
addr
)
static
inline
void
t2_outb
(
u8
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
...
...
@@ -372,13 +372,13 @@ __EXTERN_INLINE void t2_outb(u8 b, unsigned long addr)
mb
();
}
__EXTERN_INLINE
u16
t2_inw
(
unsigned
long
addr
)
static
inline
u16
t2_inw
(
unsigned
long
addr
)
{
long
result
=
*
(
vip
)
((
addr
<<
5
)
+
T2_IO
+
0x08
);
return
__kernel_extwl
(
result
,
addr
&
3
);
}
__EXTERN_INLINE
void
t2_outw
(
u16
b
,
unsigned
long
addr
)
static
inline
void
t2_outw
(
u16
b
,
unsigned
long
addr
)
{
unsigned
long
w
;
...
...
@@ -387,12 +387,12 @@ __EXTERN_INLINE void t2_outw(u16 b, unsigned long addr)
mb
();
}
__EXTERN_INLINE
u32
t2_inl
(
unsigned
long
addr
)
static
inline
u32
t2_inl
(
unsigned
long
addr
)
{
return
*
(
vuip
)
((
addr
<<
5
)
+
T2_IO
+
0x18
);
}
__EXTERN_INLINE
void
t2_outl
(
u32
b
,
unsigned
long
addr
)
static
inline
void
t2_outl
(
u32
b
,
unsigned
long
addr
)
{
*
(
vuip
)
((
addr
<<
5
)
+
T2_IO
+
0x18
)
=
b
;
mb
();
...
...
@@ -560,16 +560,15 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
spin_unlock_irqrestore
(
&
t2_hae_lock
,
flags
);
}
__EXTERN_INLINE
void
__iomem
*
t2_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
t2_ioportmap
(
unsigned
long
addr
)
{
return
(
void
__iomem
*
)
addr
;
return
(
void
__iomem
*
)
(
addr
+
T2_IO
)
;
}
__EXTERN_INLINE
void
t2_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
t2_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
T2_DENSE_MEM
)
;
}
__EXTERN_INLINE
int
t2_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -577,30 +576,47 @@ __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr)
return
(
long
)
addr
>=
0
;
}
__EXTERN_INLINE
int
t2_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
(
unsigned
long
)
addr
>=
T2_DENSE_MEM
;
}
/* New-style ioread interface. The mmio routines are so ugly for T2 that
it doesn't make sense to merge the pio and mmio routines. */
#define IOPORT(OS, NS) \
__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \
{ \
if (t2_is_mmio(xaddr)) \
return t2_read##OS(xaddr - T2_DENSE_MEM); \
else \
return t2_in##OS((unsigned long)xaddr - T2_IO); \
} \
__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \
{ \
if (t2_is_mmio(xaddr)) \
t2_write##OS(b, xaddr - T2_DENSE_MEM); \
else \
t2_out##OS(b, (unsigned long)xaddr - T2_IO); \
}
IOPORT
(
b
,
8
)
IOPORT
(
w
,
16
)
IOPORT
(
l
,
32
)
#undef IOPORT
#undef vip
#undef vuip
#ifdef __WANT_IO_DEF
#define __inb(p) t2_inb((unsigned long)(p))
#define __inw(p) t2_inw((unsigned long)(p))
#define __inl(p) t2_inl((unsigned long)(p))
#define __outb(x,p) t2_outb(x,(unsigned long)(p))
#define __outw(x,p) t2_outw(x,(unsigned long)(p))
#define __outl(x,p) t2_outl(x,(unsigned long)(p))
#define __readb(a) t2_readb(a)
#define __readw(a) t2_readw(a)
#define __readl(a) t2_readl(a)
#define __readq(a) t2_readq(a)
#define __writeb(x,a) t2_writeb(x,a)
#define __writew(x,a) t2_writew(x,a)
#define __writel(x,a) t2_writel(x,a)
#define __writeq(x,a) t2_writeq(x,a)
#define __ioremap(a,s) t2_ioremap(a,s)
#define __iounmap(a) t2_iounmap(a)
#define __is_ioaddr(a) t2_is_ioaddr((unsigned long)(a))
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX t2
#define t2_trivial_rw_bw 0
#define t2_trivial_rw_lq 0
#define t2_trivial_io_bw 0
#define t2_trivial_io_lq 0
#define t2_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_titan.h
View file @
63dd622b
...
...
@@ -377,59 +377,15 @@ struct el_PRIVATEER_envdata_mcheck {
* can only use linear accesses to get at PCI/AGP memory and I/O spaces.
*/
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
titan_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
addr
+=
TITAN_IO_BIAS
;
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
titan_outb
(
u8
b
,
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
titan_inw
(
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
titan_outw
(
u16
b
,
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
titan_inl
(
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
titan_outl
(
u32
b
,
unsigned
long
addr
)
{
addr
+=
TITAN_IO_BIAS
;
*
(
vuip
)
addr
=
b
;
mb
();
}
/*
* Memory functions. all accesses are done through linear space.
*/
__EXTERN_INLINE
void
__iomem
*
titan_ioportmap
(
unsigned
long
addr
)
{
return
(
void
__iomem
*
)(
addr
+
TITAN_IO_BIAS
);
}
extern
void
__iomem
*
titan_ioremap
(
unsigned
long
addr
,
unsigned
long
size
);
extern
void
titan_iounmap
(
volatile
void
__iomem
*
addr
);
...
...
@@ -438,88 +394,16 @@ __EXTERN_INLINE int titan_is_ioaddr(unsigned long addr)
return
addr
>=
TITAN_BASE
;
}
__EXTERN_INLINE
u8
titan_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
titan_readw
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
titan_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
(
*
(
vuip
)
addr
)
&
0xffffffff
;
}
__EXTERN_INLINE
u64
titan_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
titan_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
titan_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
titan_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
titan_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vulp
)
addr
=
b
;
}
extern
int
titan_is_mmio
(
const
volatile
void
__iomem
*
addr
);
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) titan_inb((unsigned long)(p))
#define __inw(p) titan_inw((unsigned long)(p))
#define __inl(p) titan_inl((unsigned long)(p))
#define __outb(x,p) titan_outb(x,(unsigned long)(p))
#define __outw(x,p) titan_outw(x,(unsigned long)(p))
#define __outl(x,p) titan_outl(x,(unsigned long)(p))
#define __readb(a) titan_readb(a)
#define __readw(a) titan_readw(a)
#define __readl(a) titan_readl(a)
#define __readq(a) titan_readq(a)
#define __writeb(x,a) titan_writeb(x,a)
#define __writew(x,a) titan_writew(x,a)
#define __writel(x,a) titan_writel(x,a)
#define __writeq(x,a) titan_writeq(x,a)
#define __ioremap(a,s) titan_ioremap(a,s)
#define __iounmap(a) titan_iounmap(a)
#define __is_ioaddr(a) titan_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(v,p) __outb(v,p)
#define outw(v,p) __outw(v,p)
#define outl(v,p) __outl(v,p)
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX titan
#define titan_trivial_rw_bw 1
#define titan_trivial_rw_lq 1
#define titan_trivial_io_bw 1
#define titan_trivial_io_lq 1
#define titan_trivial_iounmap 0
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_tsunami.h
View file @
63dd622b
...
...
@@ -299,69 +299,19 @@ struct el_TSUNAMI_sysdata_mcheck {
* can only use linear accesses to get at PCI memory and I/O spaces.
*/
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
__EXTERN_INLINE
u8
tsunami_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
addr
+=
TSUNAMI_IO_BIAS
;
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_outb
(
u8
b
,
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
tsunami_inw
(
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_outw
(
u16
b
,
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
tsunami_inl
(
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
tsunami_outl
(
u32
b
,
unsigned
long
addr
)
{
addr
+=
TSUNAMI_IO_BIAS
;
*
(
vuip
)
addr
=
b
;
mb
();
}
/*
* Memory functions. all accesses are done through linear space.
*/
__EXTERN_INLINE
void
__iomem
*
tsunami_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
tsunami_ioportmap
(
unsigned
long
addr
)
{
return
(
void
__iomem
*
)(
addr
+
TSUNAMI_
MEM
_BIAS
);
return
(
void
__iomem
*
)(
addr
+
TSUNAMI_
IO
_BIAS
);
}
__EXTERN_INLINE
void
tsunami_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
tsunami_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
TSUNAMI_MEM_BIAS
)
;
}
__EXTERN_INLINE
int
tsunami_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -369,87 +319,20 @@ __EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr)
return
addr
>=
TSUNAMI_BASE
;
}
__EXTERN_INLINE
u8
tsunami_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
tsunami_readw
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
tsunami_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
tsunami_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
tsunami_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
tsunami_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
tsunami_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
tsunami_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
*
(
vulp
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
xaddr
;
return
(
addr
&
0x100000000UL
)
==
0
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) tsunami_inb((unsigned long)(p))
#define __inw(p) tsunami_inw((unsigned long)(p))
#define __inl(p) tsunami_inl((unsigned long)(p))
#define __outb(x,p) tsunami_outb(x,(unsigned long)(p))
#define __outw(x,p) tsunami_outw(x,(unsigned long)(p))
#define __outl(x,p) tsunami_outl(x,(unsigned long)(p))
#define __readb(a) tsunami_readb(a)
#define __readw(a) tsunami_readw(a)
#define __readl(a) tsunami_readl(a)
#define __readq(a) tsunami_readq(a)
#define __writeb(x,a) tsunami_writeb(x,a)
#define __writew(x,a) tsunami_writew(x,a)
#define __writel(x,a) tsunami_writel(x,a)
#define __writeq(x,a) tsunami_writeq(x,a)
#define __ioremap(a,s) tsunami_ioremap(a,s)
#define __iounmap(a) tsunami_iounmap(a)
#define __is_ioaddr(a) tsunami_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb(x,p)
#define outw(x,p) __outw(x,p)
#define outl(x,p) __outl(x,p)
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX tsunami
#define tsunami_trivial_rw_bw 1
#define tsunami_trivial_rw_lq 1
#define tsunami_trivial_io_bw 1
#define tsunami_trivial_io_lq 1
#define tsunami_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/core_wildfire.h
View file @
63dd622b
...
...
@@ -273,69 +273,19 @@ typedef struct {
#define __IO_EXTERN_INLINE
#endif
#define vucp volatile unsigned char *
#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
#define vulp volatile unsigned long *
__EXTERN_INLINE
u8
wildfire_inb
(
unsigned
long
addr
)
{
/* ??? I wish I could get rid of this. But there's no ioremap
equivalent for I/O space. PCI I/O can be forced into the
correct hose's I/O region, but that doesn't take care of
legacy ISA crap. */
addr
+=
WILDFIRE_IO_BIAS
;
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_outb
(
u8
b
,
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
mb
();
}
__EXTERN_INLINE
u16
wildfire_inw
(
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_outw
(
u16
b
,
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
mb
();
}
__EXTERN_INLINE
u32
wildfire_inl
(
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
void
wildfire_outl
(
u32
b
,
unsigned
long
addr
)
{
addr
+=
WILDFIRE_IO_BIAS
;
*
(
vuip
)
addr
=
b
;
mb
();
}
/*
* Memory functions. all accesses are done through linear space.
*/
__EXTERN_INLINE
void
__iomem
*
wildfire_ioremap
(
unsigned
long
addr
,
unsigned
long
size
__attribute__
((
unused
)))
__EXTERN_INLINE
void
__iomem
*
wildfire_ioportmap
(
unsigned
long
addr
)
{
return
(
void
__iomem
*
)(
addr
+
WILDFIRE_
MEM
_BIAS
);
return
(
void
__iomem
*
)(
addr
+
WILDFIRE_
IO
_BIAS
);
}
__EXTERN_INLINE
void
wildfire_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
wildfire_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
WILDFIRE_MEM_BIAS
)
;
}
__EXTERN_INLINE
int
wildfire_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -343,87 +293,20 @@ __EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr)
return
addr
>=
WILDFIRE_BASE
;
}
__EXTERN_INLINE
u8
wildfire_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldbu
(
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
u16
wildfire_readw
(
const
volatile
void
__iomem
*
addr
)
{
return
__kernel_ldwu
(
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
u32
wildfire_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vuip
)
addr
;
}
__EXTERN_INLINE
u64
wildfire_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
*
(
vulp
)
addr
;
}
__EXTERN_INLINE
void
wildfire_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stb
(
b
,
*
(
vucp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__kernel_stw
(
b
,
*
(
vusp
)
addr
);
}
__EXTERN_INLINE
void
wildfire_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
*
(
vuip
)
addr
=
b
;
}
__EXTERN_INLINE
void
wildfire_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
int
wildfire_is_mmio
(
const
volatile
void
__iomem
*
xaddr
)
{
*
(
vulp
)
addr
=
b
;
unsigned
long
addr
=
(
unsigned
long
)
addr
;
return
(
addr
&
0x100000000UL
)
==
0
;
}
#undef vucp
#undef vusp
#undef vuip
#undef vulp
#ifdef __WANT_IO_DEF
#define __inb(p) wildfire_inb((unsigned long)(p))
#define __inw(p) wildfire_inw((unsigned long)(p))
#define __inl(p) wildfire_inl((unsigned long)(p))
#define __outb(x,p) wildfire_outb(x,(unsigned long)(p))
#define __outw(x,p) wildfire_outw(x,(unsigned long)(p))
#define __outl(x,p) wildfire_outl(x,(unsigned long)(p))
#define __readb(a) wildfire_readb(a)
#define __readw(a) wildfire_readw(a)
#define __readl(a) wildfire_readl(a)
#define __readq(a) wildfire_readq(a)
#define __writeb(x,a) wildfire_writeb(x,a)
#define __writew(x,a) wildfire_writew(x,a)
#define __writel(x,a) wildfire_writel(x,a)
#define __writeq(x,a) wildfire_writeq(x,a)
#define __ioremap(a,s) wildfire_ioremap(a,s)
#define __iounmap(a) wildfire_iounmap(a)
#define __is_ioaddr(a) wildfire_is_ioaddr((unsigned long)(a))
#define inb(p) __inb(p)
#define inw(p) __inw(p)
#define inl(p) __inl(p)
#define outb(x,p) __outb(x,p)
#define outw(x,p) __outw(x,p)
#define outl(x,p) __outl(x,p)
#define __raw_readb(a) __readb(a)
#define __raw_readw(a) __readw(a)
#define __raw_readl(a) __readl(a)
#define __raw_readq(a) __readq(a)
#define __raw_writeb(v,a) __writeb(v,a)
#define __raw_writew(v,a) __writew(v,a)
#define __raw_writel(v,a) __writel(v,a)
#define __raw_writeq(v,a) __writeq(v,a)
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX wildfire
#define wildfire_trivial_rw_bw 1
#define wildfire_trivial_rw_lq 1
#define wildfire_trivial_io_bw 1
#define wildfire_trivial_io_lq 1
#define wildfire_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/io.h
View file @
63dd622b
...
...
@@ -3,6 +3,18 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/compiler.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
/* The generic header contains only prototypes. Including it ensures that
the implementation we have here matches that interface. */
#include <asm-generic/iomap.h>
/* We don't use IO slowdowns on the Alpha, but.. */
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
...
...
@@ -16,13 +28,6 @@
#define IDENT_ADDR 0xfffffc0000000000UL
#endif
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
/*
* We try to avoid hae updates (thus the cache), but when we
* do need to update the hae, we need to do it atomically, so
...
...
@@ -89,6 +94,9 @@ static inline void * phys_to_virt(unsigned long address)
/* This depends on working iommu. */
#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff
/*
* Change addresses as seen by the kernel (virtual) to addresses as
* seen by a device (bus), and vice versa.
...
...
@@ -123,50 +131,77 @@ static inline void *bus_to_virt(unsigned long address)
* There are different chipsets to interface the Alpha CPUs to the world.
*/
#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
#define _IO_CONCAT(a,b) a ## _ ## b
#ifdef CONFIG_ALPHA_GENERIC
/* In a generic kernel, we always go through the machine vector. */
# define __inb(p) alpha_mv.mv_inb((unsigned long)(p))
# define __inw(p) alpha_mv.mv_inw((unsigned long)(p))
# define __inl(p) alpha_mv.mv_inl((unsigned long)(p))
# define __outb(x,p) alpha_mv.mv_outb(x,(unsigned long)(p))
# define __outw(x,p) alpha_mv.mv_outw(x,(unsigned long)(p))
# define __outl(x,p) alpha_mv.mv_outl(x,(unsigned long)(p))
# define __readb(a) alpha_mv.mv_readb(a)
# define __readw(a) alpha_mv.mv_readw(a)
# define __readl(a) alpha_mv.mv_readl(a)
# define __readq(a) alpha_mv.mv_readq(a)
# define __writeb(v,a) alpha_mv.mv_writeb(v,a)
# define __writew(v,a) alpha_mv.mv_writew(v,a)
# define __writel(v,a) alpha_mv.mv_writel(v,a)
# define __writeq(v,a) alpha_mv.mv_writeq(v,a)
# define __ioremap(a,s) alpha_mv.mv_ioremap(a,s)
# define __iounmap(a) alpha_mv.mv_iounmap(a)
# define __is_ioaddr(a) alpha_mv.mv_is_ioaddr((unsigned long)(a))
# define inb __inb
# define inw __inw
# define inl __inl
# define outb __outb
# define outw __outw
# define outl __outl
# define __raw_readb __readb
# define __raw_readw __readw
# define __raw_readl __readl
# define __raw_readq __readq
# define __raw_writeb __writeb
# define __raw_writew __writew
# define __raw_writel __writel
# define __raw_writeq __writeq
#define REMAP1(TYPE, NAME, QUAL) \
static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
{ \
return alpha_mv.mv_##NAME(addr); \
}
#else
#define REMAP2(TYPE, NAME, QUAL) \
static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
{ \
alpha_mv.mv_##NAME(b, addr); \
}
REMAP1
(
unsigned
int
,
ioread8
,
/**/
)
REMAP1
(
unsigned
int
,
ioread16
,
/**/
)
REMAP1
(
unsigned
int
,
ioread32
,
/**/
)
REMAP1
(
u8
,
readb
,
const
volatile
)
REMAP1
(
u16
,
readw
,
const
volatile
)
REMAP1
(
u32
,
readl
,
const
volatile
)
REMAP1
(
u64
,
readq
,
const
volatile
)
REMAP2
(
u8
,
iowrite8
,
/**/
)
REMAP2
(
u16
,
iowrite16
,
/**/
)
REMAP2
(
u32
,
iowrite32
,
/**/
)
REMAP2
(
u8
,
writeb
,
volatile
)
REMAP2
(
u16
,
writew
,
volatile
)
REMAP2
(
u32
,
writel
,
volatile
)
REMAP2
(
u64
,
writeq
,
volatile
)
#undef REMAP1
#undef REMAP2
static
inline
void
__iomem
*
generic_ioportmap
(
unsigned
long
a
)
{
return
alpha_mv
.
mv_ioportmap
(
a
);
}
/* Control how and what gets defined within the core logic headers. */
#define __WANT_IO_DEF
static
inline
void
__iomem
*
generic_ioremap
(
unsigned
long
a
,
unsigned
long
s
)
{
return
alpha_mv
.
mv_ioremap
(
a
,
s
);
}
static
inline
void
generic_iounmap
(
volatile
void
__iomem
*
a
)
{
return
alpha_mv
.
mv_iounmap
(
a
);
}
static
inline
int
generic_is_ioaddr
(
unsigned
long
a
)
{
return
alpha_mv
.
mv_is_ioaddr
(
a
);
}
static
inline
int
generic_is_mmio
(
const
volatile
void
__iomem
*
a
)
{
return
alpha_mv
.
mv_is_mmio
(
a
);
}
#define __IO_PREFIX generic
#define generic_trivial_rw_bw 0
#define generic_trivial_rw_lq 0
#define generic_trivial_io_bw 0
#define generic_trivial_io_lq 0
#define generic_trivial_iounmap 0
#else
#if defined(CONFIG_ALPHA_APECS)
# include <asm/core_apecs.h>
...
...
@@ -196,223 +231,280 @@ static inline void *bus_to_virt(unsigned long address)
#error "What system is this?"
#endif
#undef __WANT_IO_DEF
#endif
/* GENERIC */
/*
* The convention used for inb/outb etc. is that names starting with
* two underscores are the inline versions, names starting with a
* single underscore are proper functions, and names starting with a
* letter are macros that map in some way to inline or proper function
* versions. Not all that pretty, but before you change it, be sure
* to convince yourself that it won't break anything (in particular
* module support).
* We always have external versions of these routines.
*/
extern
u8
_inb
(
unsigned
long
port
);
extern
u16
_inw
(
unsigned
long
port
);
extern
u32
_inl
(
unsigned
long
port
);
extern
void
_outb
(
u8
b
,
unsigned
long
port
);
extern
void
_outw
(
u16
w
,
unsigned
long
port
);
extern
void
_outl
(
u32
l
,
unsigned
long
port
);
extern
u8
_readb
(
const
volatile
void
__iomem
*
addr
);
extern
u16
_readw
(
const
volatile
void
__iomem
*
addr
);
extern
u32
_readl
(
const
volatile
void
__iomem
*
addr
);
extern
u64
_readq
(
const
volatile
void
__iomem
*
addr
);
extern
void
_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
);
extern
void
_writew
(
u16
b
,
volatile
void
__iomem
*
addr
);
extern
void
_writel
(
u32
b
,
volatile
void
__iomem
*
addr
);
extern
void
_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
);
extern
u8
inb
(
unsigned
long
port
);
extern
u16
inw
(
unsigned
long
port
);
extern
u32
inl
(
unsigned
long
port
);
extern
void
outb
(
u8
b
,
unsigned
long
port
);
extern
void
outw
(
u16
b
,
unsigned
long
port
);
extern
void
outl
(
u32
b
,
unsigned
long
port
);
extern
u8
readb
(
const
volatile
void
__iomem
*
addr
);
extern
u16
readw
(
const
volatile
void
__iomem
*
addr
);
extern
u32
readl
(
const
volatile
void
__iomem
*
addr
);
extern
u64
readq
(
const
volatile
void
__iomem
*
addr
);
extern
void
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
);
extern
void
writew
(
u16
b
,
volatile
void
__iomem
*
addr
);
extern
void
writel
(
u32
b
,
volatile
void
__iomem
*
addr
);
extern
void
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
);
extern
u8
__raw_readb
(
const
volatile
void
__iomem
*
addr
);
extern
u16
__raw_readw
(
const
volatile
void
__iomem
*
addr
);
extern
u32
__raw_readl
(
const
volatile
void
__iomem
*
addr
);
extern
u64
__raw_readq
(
const
volatile
void
__iomem
*
addr
);
extern
void
__raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
);
extern
void
__raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
);
extern
void
__raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
);
extern
void
__raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
);
/*
* The platform header files may define some of these macros to use
* the inlined versions where appropriate. These macros may also be
* redefined by userlevel programs.
* Mapping from port numbers to __iomem space is pretty easy.
*/
#ifndef inb
# define inb(p) _inb((unsigned long)(p))
#endif
#ifndef inw
# define inw(p) _inw((unsigned long)(p))
#endif
#ifndef inl
# define inl(p) _inl((unsigned long)(p))
#endif
#ifndef outb
# define outb(b,p) _outb(b,(unsigned long)(p))
#endif
#ifndef outw
# define outw(w,p) _outw(w,(unsigned long)(p))
#endif
#ifndef outl
# define outl(l,p) _outl(l,(unsigned long)(p))
#endif
#ifndef inb_p
# define inb_p inb
#endif
#ifndef inw_p
# define inw_p inw
#endif
#ifndef inl_p
# define inl_p inl
#endif
/* These two have to be extern inline so that we don't get redefinition
errors building lib/iomap.c. Which we don't want anyway, but... */
extern
inline
void
__iomem
*
ioport_map
(
unsigned
long
port
,
unsigned
int
size
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioportmap
)
(
port
);
}
#ifndef outb_p
# define outb_p outb
#endif
#ifndef outw_p
# define outw_p outw
#endif
#ifndef outl_p
# define outl_p outl
#endif
extern
inline
void
ioport_unmap
(
void
__iomem
*
addr
)
{
}
static
inline
void
__iomem
*
ioremap
(
unsigned
long
port
,
unsigned
long
size
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioremap
)
(
port
,
size
);
}
static
inline
void
__iomem
*
ioremap_nocache
(
unsigned
long
offset
,
unsigned
long
size
)
{
return
ioremap
(
offset
,
size
);
}
static
inline
void
iounmap
(
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iounmap
)(
addr
);
}
static
inline
int
__is_ioaddr
(
unsigned
long
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
is_ioaddr
)(
addr
);
}
#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
static
inline
int
__is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
is_mmio
)(
addr
);
}
#define IO_SPACE_LIMIT 0xffff
/*
* On Alpha, we have the whole of I/O space mapped at all times, but
* at odd and sometimes discontinuous addresses. Note that the
* discontinuities are all across busses, so we need not care for that
* for any one device.
*
* The DRM drivers need to be able to map contiguously a (potentially)
* discontiguous set of I/O pages. This set of pages is scatter-gather
* mapped contiguously from the perspective of the bus, but we can't
* directly access DMA addresses from the CPU, these addresses need to
* have a real ioremap. Therefore, iounmap and the size argument to
* ioremap are needed to give the platforms the ability to fully implement
* ioremap.
*
* Map the I/O space address into the kernel's virtual address space.
* If the actual I/O bits are sufficiently trivial, then expand inline.
*/
static
inline
void
__iomem
*
ioremap
(
unsigned
long
offset
,
unsigned
long
size
)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern
inline
unsigned
int
ioread8
(
void
__iomem
*
addr
)
{
return
__ioremap
(
offset
,
size
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)(
addr
);
mb
();
return
ret
;
}
static
inline
void
iounmap
(
volatile
void
__iomem
*
addr
)
extern
inline
unsigned
int
ioread16
(
void
__iomem
*
addr
)
{
__iounmap
(
addr
);
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)(
addr
);
mb
();
return
ret
;
}
static
inline
void
__iomem
*
ioremap_nocache
(
unsigned
long
offset
,
unsigned
long
size
)
extern
inline
void
iowrite8
(
u8
b
,
void
__iomem
*
addr
)
{
return
ioremap
(
offset
,
size
);
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
b
,
addr
);
mb
();
}
/* Indirect back to the macros provided. */
extern
inline
void
iowrite16
(
u16
b
,
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
b
,
addr
);
mb
();
}
extern
u8
___raw_readb
(
const
volatile
void
__iomem
*
addr
);
extern
u16
___raw_readw
(
const
volatile
void
__iomem
*
addr
);
extern
u32
___raw_readl
(
const
volatile
void
__iomem
*
addr
);
extern
u64
___raw_readq
(
const
volatile
void
__iomem
*
addr
);
extern
void
___raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
);
extern
void
___raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
);
extern
void
___raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
);
extern
void
___raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
);
extern
inline
u8
inb
(
unsigned
long
port
)
{
return
ioread8
(
ioport_map
(
port
,
1
));
}
#ifdef __raw_readb
# define readb(a) ({ u8 r_ = __raw_readb(a); mb(); r_; })
#endif
#ifdef __raw_readw
# define readw(a) ({ u16 r_ = __raw_readw(a); mb(); r_; })
#endif
#ifdef __raw_readl
# define readl(a) ({ u32 r_ = __raw_readl(a); mb(); r_; })
#endif
#ifdef __raw_readq
# define readq(a) ({ u64 r_ = __raw_readq(a); mb(); r_; })
#endif
extern
inline
u16
inw
(
unsigned
long
port
)
{
return
ioread16
(
ioport_map
(
port
,
2
));
}
#ifdef __raw_writeb
# define writeb(v,a) ({ __raw_writeb(v,a); mb(); })
#endif
#ifdef __raw_writew
# define writew(v,a) ({ __raw_writew(v,a); mb(); })
#endif
#ifdef __raw_writel
# define writel(v,a) ({ __raw_writel(v,a); mb(); })
#endif
#ifdef __raw_writeq
# define writeq(v,a) ({ __raw_writeq(v,a); mb(); })
#endif
extern
inline
void
outb
(
u8
b
,
unsigned
long
port
)
{
iowrite8
(
b
,
ioport_map
(
port
,
1
));
}
#ifndef __raw_readb
# define __raw_readb(a) ___raw_readb(a)
#endif
#ifndef __raw_readw
# define __raw_readw(a) ___raw_readw(a)
#endif
#ifndef __raw_readl
# define __raw_readl(a) ___raw_readl(a)
#endif
#ifndef __raw_readq
# define __raw_readq(a) ___raw_readq(a)
extern
inline
void
outw
(
u16
b
,
unsigned
long
port
)
{
iowrite16
(
b
,
ioport_map
(
port
,
2
));
}
#endif
#ifndef __raw_writeb
# define __raw_writeb(v,a) ___raw_writeb(v,a)
#endif
#ifndef __raw_writew
# define __raw_writew(v,a) ___raw_writew(v,a)
#endif
#ifndef __raw_writel
# define __raw_writel(v,a) ___raw_writel(v,a)
#endif
#ifndef __raw_writeq
# define __raw_writeq(v,a) ___raw_writeq(v,a)
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern
inline
unsigned
int
ioread32
(
void
__iomem
*
addr
)
{
unsigned
int
ret
=
IO_CONCAT
(
__IO_PREFIX
,
ioread32
)(
addr
);
mb
();
return
ret
;
}
#ifndef readb
# define readb(a) _readb(a)
#endif
#ifndef readw
# define readw(a) _readw(a)
extern
inline
void
iowrite32
(
u32
b
,
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite32
)(
b
,
addr
);
mb
();
}
extern
inline
u32
inl
(
unsigned
long
port
)
{
return
ioread32
(
ioport_map
(
port
,
4
));
}
extern
inline
void
outl
(
u32
b
,
unsigned
long
port
)
{
iowrite32
(
b
,
ioport_map
(
port
,
4
));
}
#endif
#ifndef readl
# define readl(a) _readl(a)
#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
extern
inline
u8
__raw_readb
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
addr
);
}
extern
inline
u16
__raw_readw
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
addr
);
}
extern
inline
void
__raw_writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
b
,
addr
);
}
extern
inline
void
__raw_writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
b
,
addr
);
}
extern
inline
u8
readb
(
const
volatile
void
__iomem
*
addr
)
{
u8
ret
=
__raw_readb
(
addr
);
mb
();
return
ret
;
}
extern
inline
u16
readw
(
const
volatile
void
__iomem
*
addr
)
{
u16
ret
=
__raw_readw
(
addr
);
mb
();
return
ret
;
}
extern
inline
void
writeb
(
u8
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writeb
(
b
,
addr
);
mb
();
}
extern
inline
void
writew
(
u16
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writew
(
b
,
addr
);
mb
();
}
#endif
#ifndef readq
# define readq(a) _readq(a)
#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
extern
inline
u32
__raw_readl
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readl
)(
addr
);
}
extern
inline
u64
__raw_readq
(
const
volatile
void
__iomem
*
addr
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
readq
)(
addr
);
}
extern
inline
void
__raw_writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writel
)(
b
,
addr
);
}
extern
inline
void
__raw_writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
IO_CONCAT
(
__IO_PREFIX
,
writeq
)(
b
,
addr
);
}
extern
inline
u32
readl
(
const
volatile
void
__iomem
*
addr
)
{
u32
ret
=
__raw_readl
(
addr
);
mb
();
return
ret
;
}
extern
inline
u64
readq
(
const
volatile
void
__iomem
*
addr
)
{
u64
ret
=
__raw_readq
(
addr
);
mb
();
return
ret
;
}
extern
inline
void
writel
(
u32
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writel
(
b
,
addr
);
mb
();
}
extern
inline
void
writeq
(
u64
b
,
volatile
void
__iomem
*
addr
)
{
__raw_writeq
(
b
,
addr
);
mb
();
}
#endif
#define inb_p inb
#define inw_p inw
#define inl_p inl
#define outb_p outb
#define outw_p outw
#define outl_p outl
#define readb_relaxed(addr) __raw_readb(addr)
#define readw_relaxed(addr) __raw_readw(addr)
#define readl_relaxed(addr) __raw_readl(addr)
#define readq_relaxed(addr) __raw_readq(addr)
#ifndef writeb
# define writeb(v,a) _writeb(v,a)
#endif
#ifndef writew
# define writew(v,a) _writew(v,a)
#endif
#ifndef writel
# define writel(v,a) _writel(v,a)
#endif
#ifndef writeq
# define writeq(v,a) _writeq(v,a)
#endif
/*
* String version of IO memory access ops:
*/
extern
void
_
memcpy_fromio
(
void
*
,
const
volatile
void
__iomem
*
,
long
);
extern
void
_
memcpy_toio
(
volatile
void
__iomem
*
,
const
void
*
,
long
);
extern
void
memcpy_fromio
(
void
*
,
const
volatile
void
__iomem
*
,
long
);
extern
void
memcpy_toio
(
volatile
void
__iomem
*
,
const
void
*
,
long
);
extern
void
_memset_c_io
(
volatile
void
__iomem
*
,
unsigned
long
,
long
);
#define memcpy_fromio(to,from,len) \
_memcpy_fromio(to,from,len)
#define memcpy_toio(to,from,len) \
_memcpy_toio(to,from,len)
#define memset_io(addr,c,len) \
_memset_c_io(addr,0x0101010101010101UL*(u8)(c),len)
static
inline
void
memset_io
(
volatile
void
__iomem
*
addr
,
u8
c
,
long
len
)
{
_memset_c_io
(
addr
,
0x0101010101010101UL
*
c
,
len
);
}
#define __HAVE_ARCH_MEMSETW_IO
#define memsetw_io(addr,c,len) \
_memset_c_io(addr,0x0001000100010001UL*(u16)(c),len)
static
inline
void
memsetw_io
(
volatile
void
__iomem
*
addr
,
u16
c
,
long
len
)
{
_memset_c_io
(
addr
,
0x0001000100010001UL
*
c
,
len
);
}
/*
* String versions of in/out ops:
...
...
@@ -458,72 +550,85 @@ check_signature(const volatile void __iomem *io_addr,
static
inline
u8
isa_readb
(
unsigned
long
offset
)
{
return
readb
(
__ioremap
(
offset
,
1
));
void
__iomem
*
addr
=
ioremap
(
offset
,
1
);
u8
ret
=
readb
(
addr
);
iounmap
(
addr
);
return
ret
;
}
static
inline
u16
isa_readw
(
unsigned
long
offset
)
{
return
readw
(
__ioremap
(
offset
,
2
));
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
u16
ret
=
readw
(
addr
);
iounmap
(
addr
);
return
ret
;
}
static
inline
u32
isa_readl
(
unsigned
long
offset
)
{
return
readl
(
__ioremap
(
offset
,
4
));
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
u32
ret
=
readl
(
addr
);
iounmap
(
addr
);
return
ret
;
}
static
inline
void
isa_writeb
(
u8
b
,
unsigned
long
offset
)
{
writeb
(
b
,
__ioremap
(
offset
,
1
));
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
writeb
(
b
,
addr
);
iounmap
(
addr
);
}
static
inline
void
isa_writew
(
u16
w
,
unsigned
long
offset
)
{
writew
(
w
,
__ioremap
(
offset
,
2
));
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
writew
(
w
,
addr
);
iounmap
(
addr
);
}
static
inline
void
isa_writel
(
u32
l
,
unsigned
long
offset
)
{
writel
(
l
,
__ioremap
(
offset
,
4
));
void
__iomem
*
addr
=
ioremap
(
offset
,
2
);
writel
(
l
,
addr
);
iounmap
(
addr
);
}
static
inline
void
isa_memset_io
(
unsigned
long
offset
,
u8
val
,
long
n
)
{
memset_io
(
__ioremap
(
offset
,
n
),
val
,
n
);
void
__iomem
*
addr
=
ioremap
(
offset
,
n
);
memset_io
(
addr
,
val
,
n
);
iounmap
(
addr
);
}
static
inline
void
isa_memcpy_fromio
(
void
*
dest
,
unsigned
long
offset
,
long
n
)
{
memcpy_fromio
(
dest
,
__ioremap
(
offset
,
n
),
n
);
void
__iomem
*
addr
=
ioremap
(
offset
,
n
);
memcpy_fromio
(
dest
,
addr
,
n
);
iounmap
(
addr
);
}
static
inline
void
isa_memcpy_toio
(
unsigned
long
offset
,
const
void
*
src
,
long
n
)
{
memcpy_toio
(
__ioremap
(
offset
,
n
),
src
,
n
);
void
__iomem
*
addr
=
ioremap
(
offset
,
n
);
memcpy_toio
(
addr
,
src
,
n
);
iounmap
(
addr
);
}
static
inline
int
isa_check_signature
(
unsigned
long
io_addr
,
const
unsigned
char
*
signature
,
long
length
)
isa_check_signature
(
unsigned
long
offset
,
const
unsigned
char
*
sig
,
long
len
)
{
int
retval
=
0
;
do
{
if
(
isa_readb
(
io_addr
)
!=
*
signature
)
goto
out
;
io_addr
++
;
signature
++
;
length
--
;
}
while
(
length
);
retval
=
1
;
out:
return
retval
;
void
__iomem
*
addr
=
ioremap
(
offset
,
len
);
int
ret
=
check_signature
(
addr
,
sig
,
len
);
iounmap
(
addr
);
return
ret
;
}
...
...
include/asm-alpha/io_trivial.h
0 → 100644
View file @
63dd622b
/* Trivial implementations of basic i/o routines. Assumes that all
of the hard work has been done by ioremap and ioportmap, and that
access to i/o space is linear. */
/* This file may be included multiple times. */
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
__EXTERN_INLINE
unsigned
int
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)(
void
__iomem
*
a
)
{
return
__kernel_ldbu
(
*
(
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
unsigned
int
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)(
void
__iomem
*
a
)
{
return
__kernel_ldwu
(
*
(
volatile
u16
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
u8
b
,
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
u16
b
,
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u16
__force
*
)
a
);
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
__EXTERN_INLINE
unsigned
int
IO_CONCAT
(
__IO_PREFIX
,
ioread32
)(
void
__iomem
*
a
)
{
return
*
(
volatile
u32
__force
*
)
a
;
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iowrite32
)(
u32
b
,
void
__iomem
*
a
)
{
*
(
volatile
u32
__force
*
)
a
=
b
;
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
__EXTERN_INLINE
u8
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
const
volatile
void
__iomem
*
a
)
{
return
__kernel_ldbu
(
*
(
const
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
u16
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
const
volatile
void
__iomem
*
a
)
{
return
__kernel_ldwu
(
*
(
const
volatile
u16
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
u8
b
,
volatile
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u8
__force
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
u16
b
,
volatile
void
__iomem
*
a
)
{
__kernel_stb
(
b
,
*
(
volatile
u16
__force
*
)
a
);
}
#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2
__EXTERN_INLINE
u8
IO_CONCAT
(
__IO_PREFIX
,
readb
)(
const
volatile
void
__iomem
*
a
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioread8
)((
void
__iomem
*
)
a
);
}
__EXTERN_INLINE
u16
IO_CONCAT
(
__IO_PREFIX
,
readw
)(
const
volatile
void
__iomem
*
a
)
{
return
IO_CONCAT
(
__IO_PREFIX
,
ioread16
)((
void
__iomem
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writeb
)(
u8
b
,
volatile
void
__iomem
*
a
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite8
)(
b
,
(
void
__iomem
*
)
a
);
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writew
)(
u16
b
,
volatile
void
__iomem
*
a
)
{
IO_CONCAT
(
__IO_PREFIX
,
iowrite16
)(
b
,
(
void
__iomem
*
)
a
);
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
__EXTERN_INLINE
u32
IO_CONCAT
(
__IO_PREFIX
,
readl
)(
const
volatile
void
__iomem
*
a
)
{
return
*
(
const
volatile
u32
__force
*
)
a
;
}
__EXTERN_INLINE
u64
IO_CONCAT
(
__IO_PREFIX
,
readq
)(
const
volatile
void
__iomem
*
a
)
{
return
*
(
const
volatile
u64
__force
*
)
a
;
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writel
)(
u32
b
,
volatile
void
__iomem
*
a
)
{
*
(
volatile
u32
__force
*
)
a
=
b
;
}
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
writeq
)(
u64
b
,
volatile
void
__iomem
*
a
)
{
*
(
volatile
u64
__force
*
)
a
=
b
;
}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_iounmap)
__EXTERN_INLINE
void
IO_CONCAT
(
__IO_PREFIX
,
iounmap
)(
volatile
void
__iomem
*
a
)
{
}
#endif
include/asm-alpha/jensen.h
View file @
63dd622b
...
...
@@ -279,15 +279,15 @@ __EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr)
*
(
vuip
)
(
addr
+
(
4
<<
7
))
=
b
>>
32
;
}
__EXTERN_INLINE
void
__iomem
*
jensen_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
__EXTERN_INLINE
void
__iomem
*
jensen_ioportmap
(
unsigned
long
addr
)
{
return
(
void
__iomem
*
)
addr
;
}
__EXTERN_INLINE
void
jensen_iounmap
(
volatile
void
__iomem
*
addr
)
__EXTERN_INLINE
void
__iomem
*
jensen_ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
{
return
;
return
(
void
__iomem
*
)(
addr
+
0x100000000ul
)
;
}
__EXTERN_INLINE
int
jensen_is_ioaddr
(
unsigned
long
addr
)
...
...
@@ -295,39 +295,46 @@ __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
return
(
long
)
addr
>=
0
;
}
#undef vuip
__EXTERN_INLINE
int
jensen_is_mmio
(
const
volatile
void
__iomem
*
addr
)
{
return
(
unsigned
long
)
addr
>=
0x100000000ul
;
}
#ifdef __WANT_IO_DEF
#define __inb jensen_inb
#define __inw jensen_inw
#define __inl jensen_inl
#define __outb jensen_outb
#define __outw jensen_outw
#define __outl jensen_outl
#define __readb jensen_readb
#define __readw jensen_readw
#define __writeb jensen_writeb
#define __writew jensen_writew
#define __readl jensen_readl
#define __readq jensen_readq
#define __writel jensen_writel
#define __writeq jensen_writeq
#define __ioremap jensen_ioremap
#define __iounmap jensen_iounmap
#define __is_ioaddr(a) jensen_is_ioaddr((unsigned long)(a))
/* New-style ioread interface. All the routines are so ugly for Jensen
that it doesn't make sense to merge them. */
#define IOPORT(OS, NS) \
__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \
{ \
if (jensen_is_mmio(xaddr)) \
return jensen_read##OS(xaddr - 0x100000000ul); \
else \
return jensen_in##OS((unsigned long)xaddr); \
} \
__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \
{ \
if (jensen_is_mmio(xaddr)) \
jensen_write##OS(b, xaddr - 0x100000000ul); \
else \
jensen_out##OS(b, (unsigned long)xaddr); \
}
/*
* The above have so much overhead that it probably doesn't make
* sense to have them inlined (better icache behaviour).
*/
#define inb(port) \
(__builtin_constant_p(port)?__inb(port):_inb(port))
IOPORT
(
b
,
8
)
IOPORT
(
w
,
16
)
IOPORT
(
l
,
32
)
#undef IOPORT
#define outb(x, port) \
(__builtin_constant_p(port)?__outb(x,port):_outb(x,port))
#undef vuip
#endif
/* __WANT_IO_DEF */
#undef __IO_PREFIX
#define __IO_PREFIX jensen
#define jensen_trivial_rw_bw 0
#define jensen_trivial_rw_lq 0
#define jensen_trivial_io_bw 0
#define jensen_trivial_io_lq 0
#define jensen_trivial_iounmap 1
#include <asm/io_trivial.h>
#ifdef __IO_EXTERN_INLINE
#undef __EXTERN_INLINE
...
...
include/asm-alpha/machvec.h
View file @
63dd622b
...
...
@@ -45,13 +45,13 @@ struct alpha_machine_vector
void
(
*
mv_pci_tbi
)(
struct
pci_controller
*
hose
,
dma_addr_t
start
,
dma_addr_t
end
);
u
8
(
*
mv_inb
)(
unsigned
long
);
u
16
(
*
mv_inw
)(
unsigned
long
);
u
32
(
*
mv_inl
)(
unsigned
long
);
u
nsigned
int
(
*
mv_ioread8
)(
void
__iomem
*
);
u
nsigned
int
(
*
mv_ioread16
)(
void
__iomem
*
);
u
nsigned
int
(
*
mv_ioread32
)(
void
__iomem
*
);
void
(
*
mv_
outb
)(
u8
,
unsigned
long
);
void
(
*
mv_
outw
)(
u16
,
unsigned
long
);
void
(
*
mv_
outl
)(
u32
,
unsigned
long
);
void
(
*
mv_
iowrite8
)(
u8
,
void
__iomem
*
);
void
(
*
mv_
iowrite16
)(
u16
,
void
__iomem
*
);
void
(
*
mv_
iowrite32
)(
u32
,
void
__iomem
*
);
u8
(
*
mv_readb
)(
const
volatile
void
__iomem
*
);
u16
(
*
mv_readw
)(
const
volatile
void
__iomem
*
);
...
...
@@ -63,9 +63,11 @@ struct alpha_machine_vector
void
(
*
mv_writel
)(
u32
,
volatile
void
__iomem
*
);
void
(
*
mv_writeq
)(
u64
,
volatile
void
__iomem
*
);
void
__iomem
*
(
*
mv_ioportmap
)(
unsigned
long
);
void
__iomem
*
(
*
mv_ioremap
)(
unsigned
long
,
unsigned
long
);
void
(
*
mv_iounmap
)(
volatile
void
__iomem
*
);
int
(
*
mv_is_ioaddr
)(
unsigned
long
);
int
(
*
mv_is_mmio
)(
const
volatile
void
__iomem
*
);
void
(
*
mv_switch_mm
)(
struct
mm_struct
*
,
struct
mm_struct
*
,
struct
task_struct
*
);
...
...
include/asm-alpha/mmu_context.h
View file @
63dd622b
...
...
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <asm/system.h>
#include <asm/machvec.h>
#include <asm/compiler.h>
/*
* Force a context reload. This is needed when we change the page
...
...
include/asm-alpha/spinlock.h
View file @
63dd622b
...
...
@@ -124,7 +124,7 @@ static inline void _raw_write_lock(rwlock_t * lock)
" br 1b
\n
"
".previous"
:
"=m"
(
*
lock
),
"=&r"
(
regx
)
:
"
0
"
(
*
lock
)
:
"memory"
);
:
"
m
"
(
*
lock
)
:
"memory"
);
}
static
inline
void
_raw_read_lock
(
rwlock_t
*
lock
)
...
...
include/asm-alpha/tlbflush.h
View file @
63dd622b
...
...
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/compiler.h>
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment