Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9ca04f9f
Commit
9ca04f9f
authored
Sep 05, 2003
by
David Mosberger
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ia64: Finnish adding ECC support. Based on patch by Suresh Siddah.
parent
76d523bc
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
418 additions
and
53 deletions
+418
-53
arch/ia64/ia32/elfcore32.h
arch/ia64/ia32/elfcore32.h
+3
-2
arch/ia64/scripts/toolchain-flags
arch/ia64/scripts/toolchain-flags
+1
-1
include/asm-ia64/acpi.h
include/asm-ia64/acpi.h
+28
-40
include/asm-ia64/intel_intrin.h
include/asm-ia64/intel_intrin.h
+254
-0
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock.h
+45
-10
include/asm-ia64/uaccess.h
include/asm-ia64/uaccess.h
+87
-0
No files found.
arch/ia64/ia32/elfcore32.h
View file @
9ca04f9f
...
@@ -8,6 +8,8 @@
...
@@ -8,6 +8,8 @@
#ifndef _ELFCORE32_H_
#ifndef _ELFCORE32_H_
#define _ELFCORE32_H_
#define _ELFCORE32_H_
#include <asm/intrinsics.h>
#define USE_ELF_CORE_DUMP 1
#define USE_ELF_CORE_DUMP 1
/* Override elfcore.h */
/* Override elfcore.h */
...
@@ -79,8 +81,7 @@ struct elf_prpsinfo
...
@@ -79,8 +81,7 @@ struct elf_prpsinfo
pr_reg[11] = regs->r1; \
pr_reg[11] = regs->r1; \
pr_reg[12] = regs->cr_iip; \
pr_reg[12] = regs->cr_iip; \
pr_reg[13] = regs->r17 & 0xffff; \
pr_reg[13] = regs->r17 & 0xffff; \
asm volatile ("mov %0=ar.eflag ;;" \
pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
: "=r"(pr_reg[14])); \
pr_reg[15] = regs->r12; \
pr_reg[15] = regs->r12; \
pr_reg[16] = (regs->r17 >> 16) & 0xffff;
pr_reg[16] = (regs->r17 >> 16) & 0xffff;
...
...
arch/ia64/scripts/toolchain-flags
View file @
9ca04f9f
...
@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations.
...
@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations.
EOF
EOF
fi
fi
if
!
$CC
-c
$dir
/check-model.c
-o
$out
2>&1 |
grep
-q
'attribute directive ignored'
if
!
$CC
-c
$dir
/check-model.c
-o
$out
2>&1 |
grep
__model__ |
grep
-q
attrib
then
then
CPPFLAGS
=
"
$CPPFLAGS
-DHAVE_MODEL_SMALL_ATTRIBUTE"
CPPFLAGS
=
"
$CPPFLAGS
-DHAVE_MODEL_SMALL_ATTRIBUTE"
fi
fi
...
...
include/asm-ia64/acpi.h
View file @
9ca04f9f
...
@@ -54,47 +54,35 @@
...
@@ -54,47 +54,35 @@
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
#define ACPI_FLUSH_CPU_CACHE()
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
static
inline
int
do { \
acpi_acquire_global_lock
(
unsigned
int
*
lock
)
__asm__ volatile ("1: ld4 r29=[%1]\n" \
{
";;\n" \
unsigned
int
old
,
new
,
val
;
"mov ar.ccv=r29\n" \
do
{
"mov r2=r29\n" \
old
=
*
lock
;
"shr.u r30=r29,1\n" \
new
=
(((
old
&
~
0x3
)
+
2
)
+
((
old
>>
1
)
&
0x1
));
"and r29=-4,r29\n" \
val
=
ia64_cmpxchg4_acq
(
GLptr
,
new
,
old
);
";;\n" \
}
while
(
unlikely
(
val
!=
old
));
"add r29=2,r29\n" \
return
(
new
<
3
)
?
-
1
:
0
;
"and r30=1,r30\n" \
}
";;\n" \
"add r29=r29,r30\n" \
";;\n" \
"cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \
";;\n" \
"cmp.eq p6,p7=r2,r30\n" \
"(p7) br.dpnt.few 1b\n" \
"cmp.gt p8,p9=3,r29\n" \
";;\n" \
"(p8) mov %0=-1\n" \
"(p9) mov %0=r0\n" \
:"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \
} while (0)
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
static
inline
int
do { \
acpi_release_global_lock
(
unsigned
int
*
lock
)
__asm__ volatile ("1: ld4 r29=[%1]\n" \
{
";;\n" \
unsigned
int
old
,
new
,
val
;
"mov ar.ccv=r29\n" \
do
{
"mov r2=r29\n" \
old
=
*
lock
;
"and r29=-4,r29\n" \
new
=
old
&
~
0x3
;
";;\n" \
val
=
ia64_cmpxchg4_acq
(
lock
,
new
,
old
);
"cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \
}
while
(
unlikely
(
val
!=
old
));
";;\n" \
return
old
&
0x1
;
"cmp.eq p6,p7=r2,r30\n" \
}
"(p7) br.dpnt.few 1b\n" \
"and %0=1,r2\n" \
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
";;\n" \
((Acq) = acpi_acquire_global_lock((unsigned int *) GLptr))
:"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \
} while (0)
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = acpi_release_global_lock((unsigned int *) GLptr))
const
char
*
acpi_get_sysname
(
void
);
const
char
*
acpi_get_sysname
(
void
);
int
acpi_request_vector
(
u32
int_type
);
int
acpi_request_vector
(
u32
int_type
);
...
...
include/asm-ia64/intel_intrin.h
0 → 100644
View file @
9ca04f9f
#ifndef _ASM_IA64_INTEL_INTRIN_H
#define _ASM_IA64_INTEL_INTRIN_H
/*
* Intel Compiler Intrinsics
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*
*/
#include <asm/types.h>
void
__lfetch
(
int
lfhint
,
void
*
y
);
void
__lfetch_excl
(
int
lfhint
,
void
*
y
);
void
__lfetch_fault
(
int
lfhint
,
void
*
y
);
void
__lfetch_fault_excl
(
int
lfhint
,
void
*
y
);
/* In the following, whichFloatReg should be an integer from 0-127 */
void
__ldfs
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldfd
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldfe
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldf8
(
const
int
whichFloatReg
,
void
*
src
);
void
__ldf_fill
(
const
int
whichFloatReg
,
void
*
src
);
void
__stfs
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stfd
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stfe
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stf8
(
void
*
dst
,
const
int
whichFloatReg
);
void
__stf_spill
(
void
*
dst
,
const
int
whichFloatReg
);
void
__st1_rel
(
void
*
dst
,
const
__s8
value
);
void
__st2_rel
(
void
*
dst
,
const
__s16
value
);
void
__st4_rel
(
void
*
dst
,
const
__s32
value
);
void
__st8_rel
(
void
*
dst
,
const
__s64
value
);
__u8
__ld1_acq
(
void
*
src
);
__u16
__ld2_acq
(
void
*
src
);
__u32
__ld4_acq
(
void
*
src
);
__u64
__ld8_acq
(
void
*
src
);
__u64
__fetchadd4_acq
(
__u32
*
addend
,
const
int
increment
);
__u64
__fetchadd4_rel
(
__u32
*
addend
,
const
int
increment
);
__u64
__fetchadd8_acq
(
__u64
*
addend
,
const
int
increment
);
__u64
__fetchadd8_rel
(
__u64
*
addend
,
const
int
increment
);
__u64
__getf_exp
(
double
d
);
/* OS Related Itanium(R) Intrinsics */
/* The names to use for whichReg and whichIndReg below come from
the include file asm/ia64regs.h */
__u64
__getIndReg
(
const
int
whichIndReg
,
__s64
index
);
__u64
__getReg
(
const
int
whichReg
);
void
__setIndReg
(
const
int
whichIndReg
,
__s64
index
,
__u64
value
);
void
__setReg
(
const
int
whichReg
,
__u64
value
);
void
__mf
(
void
);
void
__mfa
(
void
);
void
__synci
(
void
);
void
__itcd
(
__s64
pa
);
void
__itci
(
__s64
pa
);
void
__itrd
(
__s64
whichTransReg
,
__s64
pa
);
void
__itri
(
__s64
whichTransReg
,
__s64
pa
);
void
__ptce
(
__s64
va
);
void
__ptcl
(
__s64
va
,
__s64
pagesz
);
void
__ptcg
(
__s64
va
,
__s64
pagesz
);
void
__ptcga
(
__s64
va
,
__s64
pagesz
);
void
__ptri
(
__s64
va
,
__s64
pagesz
);
void
__ptrd
(
__s64
va
,
__s64
pagesz
);
void
__invala
(
void
);
void
__invala_gr
(
const
int
whichGeneralReg
/* 0-127 */
);
void
__invala_fr
(
const
int
whichFloatReg
/* 0-127 */
);
void
__nop
(
const
int
);
void
__fc
(
__u64
*
addr
);
void
__sum
(
int
mask
);
void
__rum
(
int
mask
);
void
__ssm
(
int
mask
);
void
__rsm
(
int
mask
);
__u64
__thash
(
__s64
);
__u64
__ttag
(
__s64
);
__s64
__tpa
(
__s64
);
/* Intrinsics for implementing get/put_user macros */
void
__st_user
(
const
char
*
tableName
,
__u64
addr
,
char
size
,
char
relocType
,
__u64
val
);
void
__ld_user
(
const
char
*
tableName
,
__u64
addr
,
char
size
,
char
relocType
);
/* This intrinsic does not generate code, it creates a barrier across which
* the compiler will not schedule data access instructions.
*/
void
__memory_barrier
(
void
);
void
__isrlz
(
void
);
void
__dsrlz
(
void
);
__u64
_m64_mux1
(
__u64
a
,
const
int
n
);
__u64
__thash
(
__u64
);
/* Lock and Atomic Operation Related Intrinsics */
__u64
_InterlockedExchange8
(
volatile
__u8
*
trgt
,
__u8
value
);
__u64
_InterlockedExchange16
(
volatile
__u16
*
trgt
,
__u16
value
);
__s64
_InterlockedExchange
(
volatile
__u32
*
trgt
,
__u32
value
);
__s64
_InterlockedExchange64
(
volatile
__u64
*
trgt
,
__u64
value
);
__u64
_InterlockedCompareExchange8_rel
(
volatile
__u8
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange8_acq
(
volatile
__u8
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange16_rel
(
volatile
__u16
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange16_acq
(
volatile
__u16
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange_rel
(
volatile
__u32
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange_acq
(
volatile
__u32
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange64_rel
(
volatile
__u64
*
dest
,
__u64
xchg
,
__u64
comp
);
__u64
_InterlockedCompareExchange64_acq
(
volatile
__u64
*
dest
,
__u64
xchg
,
__u64
comp
);
__s64
_m64_dep_mi
(
const
int
v
,
__s64
s
,
const
int
p
,
const
int
len
);
__s64
_m64_shrp
(
__s64
a
,
__s64
b
,
const
int
count
);
__s64
_m64_popcnt
(
__s64
a
);
#define ia64_barrier() __memory_barrier()
#define ia64_stop()
/* Nothing: As of now stop bit is generated for each
* intrinsic
*/
#define ia64_getreg __getReg
#define ia64_setreg __setReg
#define ia64_hint(x)
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1 _m64_mux1
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
#define ia64_tpa __tpa
#define ia64_invala __invala
#define ia64_invala_gr __invala_gr
#define ia64_invala_fr __invala_fr
#define ia64_nop __nop
#define ia64_sum __sum
#define ia64_ssm __ssm
#define ia64_rum __rum
#define ia64_rsm __rsm
#define ia64_fc __fc
#define ia64_ldfs __ldfs
#define ia64_ldfd __ldfd
#define ia64_ldfe __ldfe
#define ia64_ldf8 __ldf8
#define ia64_ldf_fill __ldf_fill
#define ia64_stfs __stfs
#define ia64_stfd __stfd
#define ia64_stfe __stfe
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
#define ia64_mf __mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
#define ia64_fetchadd4_rel __fetchadd4_rel
#define ia64_fetchadd8_acq __fetchadd8_acq
#define ia64_fetchadd8_rel __fetchadd8_rel
#define ia64_xchg1 _InterlockedExchange8
#define ia64_xchg2 _InterlockedExchange16
#define ia64_xchg4 _InterlockedExchange
#define ia64_xchg8 _InterlockedExchange64
#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
#define __ia64_set_dbr(index, val) \
__setIndReg(_IA64_REG_INDR_DBR, index, val)
#define ia64_set_ibr(index, val) \
__setIndReg(_IA64_REG_INDR_IBR, index, val)
#define ia64_set_pkr(index, val) \
__setIndReg(_IA64_REG_INDR_PKR, index, val)
#define ia64_set_pmc(index, val) \
__setIndReg(_IA64_REG_INDR_PMC, index, val)
#define ia64_set_pmd(index, val) \
__setIndReg(_IA64_REG_INDR_PMD, index, val)
#define ia64_set_rr(index, val) \
__setIndReg(_IA64_REG_INDR_RR, index, val)
#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
#define ia64_srlz_d __dsrlz
#define ia64_srlz_i __isrlz
#define ia64_st1_rel __st1_rel
#define ia64_st2_rel __st2_rel
#define ia64_st4_rel __st4_rel
#define ia64_st8_rel __st8_rel
#define ia64_ld1_acq __ld1_acq
#define ia64_ld2_acq __ld2_acq
#define ia64_ld4_acq __ld4_acq
#define ia64_ld8_acq __ld8_acq
#define ia64_sync_i __synci
#define ia64_thash __thash
#define ia64_ttag __ttag
#define ia64_itcd __itcd
#define ia64_itci __itci
#define ia64_itrd __itrd
#define ia64_itri __itri
#define ia64_ptce __ptce
#define ia64_ptcl __ptcl
#define ia64_ptcg __ptcg
#define ia64_ptcga __ptcga
#define ia64_ptri __ptri
#define ia64_ptrd __ptrd
#define ia64_dep_mi _m64_dep_mi
/* Values for lfhint in __lfetch and __lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
#define ia64_lfetch_fault __lfetch_fault
#define ia64_lfetch_fault_excl __lfetch_fault_excl
#define ia64_intrin_local_irq_restore(x) \
do { \
if ((x) != 0) { \
ia64_ssm(IA64_PSR_I); \
ia64_srlz_d(); \
} else { \
ia64_rsm(IA64_PSR_I); \
} \
} while (0)
#endif
/* _ASM_IA64_INTEL_INTRIN_H */
include/asm-ia64/spinlock.h
View file @
9ca04f9f
...
@@ -24,6 +24,7 @@ typedef struct {
...
@@ -24,6 +24,7 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
#define spin_lock_init(x) ((x)->lock = 0)
#ifdef ASM_SUPPORTED
/*
/*
* Try to get the lock. If we fail to get the lock, make a non-standard call to
* Try to get the lock. If we fail to get the lock, make a non-standard call to
* ia64_spinlock_contention(). We do not use a normal call because that would force all
* ia64_spinlock_contention(). We do not use a normal call because that would force all
...
@@ -85,6 +86,21 @@ _raw_spin_lock (spinlock_t *lock)
...
@@ -85,6 +86,21 @@ _raw_spin_lock (spinlock_t *lock)
# endif
/* CONFIG_MCKINLEY */
# endif
/* CONFIG_MCKINLEY */
#endif
#endif
}
}
#else
/* !ASM_SUPPORTED */
# define _raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
__u64 ia64_spinlock_val; \
ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
if (unlikely(ia64_spinlock_val)) { \
do { \
while (*ia64_spinlock_ptr) \
ia64_barrier(); \
ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
} while (ia64_spinlock_val); \
} \
} while (0)
#endif
/* !ASM_SUPPORTED */
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
...
@@ -117,22 +133,19 @@ do { \
...
@@ -117,22 +133,19 @@ do { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
} while (0)
#ifdef ASM_SUPPORTED
#define _raw_write_lock(rw) \
#define _raw_write_lock(rw) \
do { \
do { \
__asm__ __volatile__ ( \
__asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \
"mov ar.ccv = r0\n" \
"dep r29 = -1, r0, 31, 1\n" \
"dep r29 = -1, r0, 31, 1;;\n" \
";;\n" \
"1:\n" \
"1:\n" \
"ld4 r2 = [%0]\n" \
"ld4 r2 = [%0];;\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \
"cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \
"(p7) br.cond.spnt.few 1b \n" \
"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
"cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
"(p7) br.cond.spnt.few 1b;;\n" \
";;\n" \
:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
} while(0)
} while(0)
...
@@ -142,13 +155,35 @@ do { \
...
@@ -142,13 +155,35 @@ do { \
\
\
__asm__ __volatile__ ( \
__asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \
"mov ar.ccv = r0\n" \
"dep r29 = -1, r0, 31, 1\n" \
"dep r29 = -1, r0, 31, 1;;\n" \
";;\n" \
"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
(result == 0); \
(result == 0); \
})
})
#else
/* !ASM_SUPPORTED */
#define _raw_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 ia64_write_lock_ptr = (__u32 *) (l); \
do { \
while (*ia64_write_lock_ptr) \
ia64_barrier(); \
ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
} while (ia64_val); \
})
#define _raw_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
(ia64_val == 0); \
})
#endif
/* !ASM_SUPPORTED */
#define _raw_write_unlock(x) \
#define _raw_write_unlock(x) \
({ \
({ \
smp_mb__before_clear_bit();
/* need barrier before releasing lock... */
\
smp_mb__before_clear_bit();
/* need barrier before releasing lock... */
\
...
...
include/asm-ia64/uaccess.h
View file @
9ca04f9f
...
@@ -33,6 +33,7 @@
...
@@ -33,6 +33,7 @@
#include <linux/errno.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
/*
/*
...
@@ -86,6 +87,8 @@ verify_area (int type, const void *addr, unsigned long size)
...
@@ -86,6 +87,8 @@ verify_area (int type, const void *addr, unsigned long size)
#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
#ifdef ASM_SUPPORTED
extern
void
__get_user_unknown
(
void
);
extern
void
__get_user_unknown
(
void
);
#define __get_user_nocheck(x,ptr,size) \
#define __get_user_nocheck(x,ptr,size) \
...
@@ -217,6 +220,90 @@ extern void __put_user_unknown (void);
...
@@ -217,6 +220,90 @@ extern void __put_user_unknown (void);
"[1:]" \
"[1:]" \
: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
#else
/* !ASM_SUPPORTED */
#define RELOC_TYPE 2
/* ip-rel */
#define __put_user_xx(val, addr, size, err) \
__st_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE, (unsigned long) (val)); \
(err) = ia64_getreg(_IA64_REG_R8);
#define __get_user_xx(val, addr, size, err) \
__ld_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE); \
(err) = ia64_getreg(_IA64_REG_R8); \
(val) = ia64_getreg(_IA64_REG_R9);
extern
void
__get_user_unknown
(
void
);
#define __get_user_nocheck(x, ptr, size) \
({ \
register long __gu_err = 0; \
register long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
switch (size) { \
case 1: case 2: case 4: case 8: \
__get_user_xx(__gu_val, __gu_addr, size, __gu_err); \
break; \
default: \
__get_user_unknown(); \
break; \
} \
(x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define __get_user_check(x,ptr,size,segment) \
({ \
register long __gu_err = -EFAULT; \
register long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (__access_ok((long) __gu_addr, size, segment)) { \
switch (size) { \
case 1: case 2: case 4: case 8: \
__get_user_xx(__gu_val, __gu_addr, size, __gu_err); \
break; \
default: \
__get_user_unknown(); break; \
} \
} \
(x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
extern
void
__put_user_unknown
(
void
);
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err = 0; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
switch (size) { \
case 1: case 2: case 4: case 8: \
__put_user_xx(x, __pu_addr, size, __pu_err); \
break; \
default: \
__put_user_unknown(); break; \
} \
__pu_err; \
})
#define __put_user_check(x,ptr,size,segment) \
({ \
register long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (__access_ok((long)__pu_addr,size,segment)) { \
switch (size) { \
case 1: case 2: case 4: case 8: \
__put_user_xx(x,__pu_addr, size, __pu_err); \
break; \
default: \
__put_user_unknown(); break; \
} \
} \
__pu_err; \
})
#endif
/* !ASM_SUPPORTED */
/*
/*
* Complex access routines
* Complex access routines
*/
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment