Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ae02e964
Commit
ae02e964
authored
Mar 21, 2006
by
Tony Luck
Browse files
Options
Browse Files
Download
Plain Diff
Pull icc-cleanup into release branch
parents
409761bb
dcc1dd23
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
95 additions
and
169 deletions
+95
-169
arch/ia64/sn/kernel/Makefile
arch/ia64/sn/kernel/Makefile
+2
-1
arch/ia64/sn/kernel/pio_phys.S
arch/ia64/sn/kernel/pio_phys.S
+71
-0
include/asm-ia64/intel_intrin.h
include/asm-ia64/intel_intrin.h
+17
-117
include/asm-ia64/sn/rw_mmr.h
include/asm-ia64/sn/rw_mmr.h
+5
-51
No files found.
arch/ia64/sn/kernel/Makefile
View file @
ae02e964
...
...
@@ -10,7 +10,8 @@
CPPFLAGS
+=
-I
$(srctree)
/arch/ia64/sn/include
obj-y
+=
setup.o bte.o bte_error.o irq.o mca.o idle.o
\
huberror.o io_init.o iomv.o klconflib.o sn2/
huberror.o io_init.o iomv.o klconflib.o pio_phys.o
\
sn2/
obj-$(CONFIG_IA64_GENERIC)
+=
machvec.o
obj-$(CONFIG_SGI_TIOCX)
+=
tiocx.o
obj-$(CONFIG_IA64_SGI_SN_XP)
+=
xp.o
...
...
arch/ia64/sn/kernel/pio_phys.S
0 → 100644
View file @
ae02e964
/*
*
This
file
is
subject
to
the
terms
and
conditions
of
the
GNU
General
Public
*
License
.
See
the
file
"COPYING"
in
the
main
directory
of
this
archive
*
for
more
details
.
*
*
Copyright
(
C
)
2000
-
2005
Silicon
Graphics
,
Inc
.
All
rights
reserved
.
*
*
This
file
contains
macros
used
to
access
MMR
registers
via
*
uncached
physical
addresses
.
*
pio_phys_read_mmr
-
read
an
MMR
*
pio_phys_write_mmr
-
write
an
MMR
*
pio_atomic_phys_write_mmrs
-
atomically
write
1
or
2
MMRs
with
psr
.
ic
=
0
*
Second
MMR
will
be
skipped
if
address
is
NULL
*
*
Addresses
passed
to
these
routines
should
be
uncached
physical
addresses
*
ie
.
,
0x80000
....
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
GLOBAL_ENTRY
(
pio_phys_read_mmr
)
.
prologue
.
regstk
1
,
0
,
0
,
0
.
body
mov
r2
=
psr
rsm
psr
.
i
|
psr
.
dt
;;
srlz.d
ld8.acq
r8
=[
r32
]
;;
mov
psr
.
l
=
r2
;;
srlz.d
br.ret.sptk.many
rp
END
(
pio_phys_read_mmr
)
GLOBAL_ENTRY
(
pio_phys_write_mmr
)
.
prologue
.
regstk
2
,
0
,
0
,
0
.
body
mov
r2
=
psr
rsm
psr
.
i
|
psr
.
dt
;;
srlz.d
st8.rel
[
r32
]=
r33
;;
mov
psr
.
l
=
r2
;;
srlz.d
br.ret.sptk.many
rp
END
(
pio_phys_write_mmr
)
GLOBAL_ENTRY
(
pio_atomic_phys_write_mmrs
)
.
prologue
.
regstk
4
,
0
,
0
,
0
.
body
mov
r2
=
psr
cmp.ne
p9
,
p0
=
r34
,
r0
;
rsm
psr
.
i
| psr.dt |
psr
.
ic
;;
srlz.d
st8.rel
[
r32
]=
r33
(
p9
)
st8.rel
[
r34
]=
r35
;;
mov
psr
.
l
=
r2
;;
srlz.d
br.ret.sptk.many
rp
END
(
pio_atomic_phys_write_mmrs
)
include/asm-ia64/intel_intrin.h
View file @
ae02e964
...
...
@@ -5,113 +5,10 @@
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
* Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
*
*/
#include <asm/types.h>
void
__lfetch
(
int
lfhint
,
void
*
y
);
void
__lfetch_excl
(
int
lfhint
,
void
*
y
);
void
__lfetch_fault
(
int
lfhint
,
void
*
y
);
void
__lfetch_fault_excl
(
int
lfhint
,
void
*
y
);
/* In the following, whichFloatReg should be an integer from 0-127 */
void
__ldfs
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldfd
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldfe
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldf8
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldf_fill
(
const
int
whichFloatReg
,
void
*
src
);
void
__stfs
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stfd
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stfe
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stf8
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stf_spill
(
void
*
dst
,
const
int
whichFloatReg
);
void
__st1_rel
(
void
*
dst
,
const
__s8
value
);
void
__st2_rel
(
void
*
dst
,
const
__s16
value
);
void
__st4_rel
(
void
*
dst
,
const
__s32
value
);
void
__st8_rel
(
void
*
dst
,
const
__s64
value
);
__u8
__ld1_acq
(
void
*
src
);
__u16
__ld2_acq
(
void
*
src
);
__u32
__ld4_acq
(
void
*
src
);
__u64
__ld8_acq
(
void
*
src
);
__u64
__fetchadd4_acq
(
__u32
*
addend
,
const
int
increment
);
__u64
__fetchadd4_rel
(
__u32
*
addend
,
const
int
increment
);
__u64
__fetchadd8_acq
(
__u64
*
addend
,
const
int
increment
);
__u64
__fetchadd8_rel
(
__u64
*
addend
,
const
int
increment
);
__u64
__getf_exp
(
double
d
);
/* OS Related Itanium(R) Intrinsics */
/* The names to use for whichReg and whichIndReg below come from
the include file asm/ia64regs.h */
__u64
__getIndReg
(
const
int
whichIndReg
,
__s64
index
);
__u64
__getReg
(
const
int
whichReg
);
void
__setIndReg
(
const
int
whichIndReg
,
__s64
index
,
__u64
value
);
void
__setReg
(
const
int
whichReg
,
__u64
value
);
void
__mf
(
void
);
void
__mfa
(
void
);
void
__synci
(
void
);
void
__itcd
(
__s64
pa
);
void
__itci
(
__s64
pa
);
void
__itrd
(
__s64
whichTransReg
,
__s64
pa
);
void
__itri
(
__s64
whichTransReg
,
__s64
pa
);
void
__ptce
(
__s64
va
);
void
__ptcl
(
__s64
va
,
__s64
pagesz
);
void
__ptcg
(
__s64
va
,
__s64
pagesz
);
void
__ptcga
(
__s64
va
,
__s64
pagesz
);
void
__ptri
(
__s64
va
,
__s64
pagesz
);
void
__ptrd
(
__s64
va
,
__s64
pagesz
);
void
__invala
(
void
);
void
__invala_gr
(
const
int
whichGeneralReg
/* 0-127 */
);
void
__invala_fr
(
const
int
whichFloatReg
/* 0-127 */
);
void
__nop
(
const
int
);
void
__fc
(
__u64
*
addr
);
void
__sum
(
int
mask
);
void
__rum
(
int
mask
);
void
__ssm
(
int
mask
);
void
__rsm
(
int
mask
);
__u64
__thash
(
__s64
);
__u64
__ttag
(
__s64
);
__s64
__tpa
(
__s64
);
/* Intrinsics for implementing get/put_user macros */
void
__st_user
(
const
char
*
tableName
,
__u64
addr
,
char
size
,
char
relocType
,
__u64
val
);
void
__ld_user
(
const
char
*
tableName
,
__u64
addr
,
char
size
,
char
relocType
);
/* This intrinsic does not generate code, it creates a barrier across which
* the compiler will not schedule data access instructions.
*/
void
__memory_barrier
(
void
);
void
__isrlz
(
void
);
void
__dsrlz
(
void
);
__u64
_m64_mux1
(
__u64
a
,
const
int
n
);
__u64
__thash
(
__u64
);
/* Lock and Atomic Operation Related Intrinsics */
__u64
_InterlockedExchange8
(
volatile
__u8
*
trgt
,
__u8
value
);
__u64
_InterlockedExchange16
(
volatile
__u16
*
trgt
,
__u16
value
);
__s64
_InterlockedExchange
(
volatile
__u32
*
trgt
,
__u32
value
);
__s64
_InterlockedExchange64
(
volatile
__u64
*
trgt
,
__u64
value
);
__u64
_InterlockedCompareExchange8_rel
(
volatile
__u8
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange8_acq
(
volatile
__u8
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange16_rel
(
volatile
__u16
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange16_acq
(
volatile
__u16
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange_rel
(
volatile
__u32
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange_acq
(
volatile
__u32
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange64_rel
(
volatile
__u64
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange64_acq
(
volatile
__u64
*
dest
,
__u64
xchg
,
__u64
comp
);
__s64
_m64_dep_mi
(
const
int
v
,
__s64
s
,
const
int
p
,
const
int
len
);
__s64
_m64_shrp
(
__s64
a
,
__s64
b
,
const
int
count
);
__s64
_m64_popcnt
(
__s64
a
);
#include <ia64intrin.h>
#define ia64_barrier() __memory_barrier()
...
...
@@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_getreg __getReg
#define ia64_setreg __setReg
#define ia64_hint(x)
#define ia64_hint __hint
#define ia64_hint_pause __hint_pause
#define ia64_mux1_brcst
0
#define ia64_mux1_mix
8
#define ia64_mux1_shuf
9
#define ia64_mux1_alt
10
#define ia64_mux1_rev
11
#define ia64_mux1_brcst
_m64_mux1_brcst
#define ia64_mux1_mix
_m64_mux1_mix
#define ia64_mux1_shuf
_m64_mux1_shuf
#define ia64_mux1_alt
_m64_mux1_alt
#define ia64_mux1_rev
_m64_mux1_rev
#define ia64_mux1
_m64_mux1
#define ia64_mux1
(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
...
...
@@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
#define ia64_mf __mf
#define ia64_mf
__mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
...
...
@@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a);
/* Values for lfhint in __lfetch and __lfetch_fault */
#define ia64_lfhint_none
0
#define ia64_lfhint_nt1
1
#define ia64_lfhint_nt2
2
#define ia64_lfhint_nta
3
#define ia64_lfhint_none
__lfhint_none
#define ia64_lfhint_nt1
__lfhint_nt
1
#define ia64_lfhint_nt2
__lfhint_nt
2
#define ia64_lfhint_nta
__lfhint_nta
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
...
...
@@ -254,4 +152,6 @@ do { \
} \
} while (0)
#define __builtin_trap() __break(0);
#endif
/* _ASM_IA64_INTEL_INTRIN_H */
include/asm-ia64/sn/rw_mmr.h
View file @
ae02e964
...
...
@@ -3,15 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002-200
4
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2002-200
6
Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
/*
* This file contains macros used to access MMR registers via
* uncached physical addresses.
* This file that access MMRs via uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
...
...
@@ -22,53 +21,8 @@
*/
extern
inline
long
pio_phys_read_mmr
(
volatile
long
*
mmr
)
{
long
val
;
asm
volatile
(
"mov r2=psr;;"
"rsm psr.i | psr.dt;;"
"srlz.i;;"
"ld8.acq %0=[%1];;"
"mov psr.l=r2;;"
"srlz.i;;"
:
"=r"
(
val
)
:
"r"
(
mmr
)
:
"r2"
);
return
val
;
}
extern
inline
void
pio_phys_write_mmr
(
volatile
long
*
mmr
,
long
val
)
{
asm
volatile
(
"mov r2=psr;;"
"rsm psr.i | psr.dt;;"
"srlz.i;;"
"st8.rel [%0]=%1;;"
"mov psr.l=r2;;"
"srlz.i;;"
::
"r"
(
mmr
),
"r"
(
val
)
:
"r2"
,
"memory"
);
}
extern
inline
void
pio_atomic_phys_write_mmrs
(
volatile
long
*
mmr1
,
long
val1
,
volatile
long
*
mmr2
,
long
val2
)
{
asm
volatile
(
"mov r2=psr;;"
"rsm psr.i | psr.dt | psr.ic;;"
"cmp.ne p9,p0=%2,r0;"
"srlz.i;;"
"st8.rel [%0]=%1;"
"(p9) st8.rel [%2]=%3;;"
"mov psr.l=r2;;"
"srlz.i;;"
::
"r"
(
mmr1
),
"r"
(
val1
),
"r"
(
mmr2
),
"r"
(
val2
)
:
"p9"
,
"r2"
,
"memory"
);
}
extern
long
pio_phys_read_mmr
(
volatile
long
*
mmr
);
extern
void
pio_phys_write_mmr
(
volatile
long
*
mmr
,
long
val
);
extern
void
pio_atomic_phys_write_mmrs
(
volatile
long
*
mmr1
,
long
val1
,
volatile
long
*
mmr2
,
long
val2
);
#endif
/* _ASM_IA64_SN_RW_MMR_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment