Commit 9873ce2e authored by Ilya Leoshkevich's avatar Ilya Leoshkevich Committed by Alexei Starovoitov

selftests/bpf: Add big-endian support to the ldsx test

Prepare the ldsx test to run on big-endian systems by adding the
necessary endianness checks around narrow memory accesses.
Signed-off-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Link: https://lore.kernel.org/r/20230919101336.2223655-4-iii@linux.ibm.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 6cb66eca
...@@ -104,7 +104,11 @@ int _tc(volatile struct __sk_buff *skb) ...@@ -104,7 +104,11 @@ int _tc(volatile struct __sk_buff *skb)
"%[tmp_mark] = r1" "%[tmp_mark] = r1"
: [tmp_mark]"=r"(tmp_mark) : [tmp_mark]"=r"(tmp_mark)
: [ctx]"r"(skb), : [ctx]"r"(skb),
[off_mark]"i"(offsetof(struct __sk_buff, mark)) [off_mark]"i"(offsetof(struct __sk_buff, mark)
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ sizeof(skb->mark) - 1
#endif
)
: "r1"); : "r1");
#else #else
tmp_mark = (char)skb->mark; tmp_mark = (char)skb->mark;
......
...@@ -13,12 +13,16 @@ __description("LDSX, S8") ...@@ -13,12 +13,16 @@ __description("LDSX, S8")
__success __success_unpriv __retval(-2) __success __success_unpriv __retval(-2)
__naked void ldsx_s8(void) __naked void ldsx_s8(void)
{ {
asm volatile (" \ asm volatile (
r1 = 0x3fe; \ "r1 = 0x3fe;"
*(u64 *)(r10 - 8) = r1; \ "*(u64 *)(r10 - 8) = r1;"
r0 = *(s8 *)(r10 - 8); \ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
exit; \ "r0 = *(s8 *)(r10 - 8);"
" ::: __clobber_all); #else
"r0 = *(s8 *)(r10 - 1);"
#endif
"exit;"
::: __clobber_all);
} }
SEC("socket") SEC("socket")
...@@ -26,12 +30,16 @@ __description("LDSX, S16") ...@@ -26,12 +30,16 @@ __description("LDSX, S16")
__success __success_unpriv __retval(-2) __success __success_unpriv __retval(-2)
__naked void ldsx_s16(void) __naked void ldsx_s16(void)
{ {
asm volatile (" \ asm volatile (
r1 = 0x3fffe; \ "r1 = 0x3fffe;"
*(u64 *)(r10 - 8) = r1; \ "*(u64 *)(r10 - 8) = r1;"
r0 = *(s16 *)(r10 - 8); \ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
exit; \ "r0 = *(s16 *)(r10 - 8);"
" ::: __clobber_all); #else
"r0 = *(s16 *)(r10 - 2);"
#endif
"exit;"
::: __clobber_all);
} }
SEC("socket") SEC("socket")
...@@ -39,13 +47,17 @@ __description("LDSX, S32") ...@@ -39,13 +47,17 @@ __description("LDSX, S32")
__success __success_unpriv __retval(-1) __success __success_unpriv __retval(-1)
__naked void ldsx_s32(void) __naked void ldsx_s32(void)
{ {
asm volatile (" \ asm volatile (
r1 = 0xfffffffe; \ "r1 = 0xfffffffe;"
*(u64 *)(r10 - 8) = r1; \ "*(u64 *)(r10 - 8) = r1;"
r0 = *(s32 *)(r10 - 8); \ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
r0 >>= 1; \ "r0 = *(s32 *)(r10 - 8);"
exit; \ #else
" ::: __clobber_all); "r0 = *(s32 *)(r10 - 4);"
#endif
"r0 >>= 1;"
"exit;"
::: __clobber_all);
} }
SEC("socket") SEC("socket")
...@@ -54,20 +66,24 @@ __log_level(2) __success __retval(1) ...@@ -54,20 +66,24 @@ __log_level(2) __success __retval(1)
__msg("R1_w=scalar(smin=-128,smax=127)") __msg("R1_w=scalar(smin=-128,smax=127)")
__naked void ldsx_s8_range_priv(void) __naked void ldsx_s8_range_priv(void)
{ {
asm volatile (" \ asm volatile (
call %[bpf_get_prandom_u32]; \ "call %[bpf_get_prandom_u32];"
*(u64 *)(r10 - 8) = r0; \ "*(u64 *)(r10 - 8) = r0;"
r1 = *(s8 *)(r10 - 8); \ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* r1 with s8 range */ \ "r1 = *(s8 *)(r10 - 8);"
if r1 s> 0x7f goto l0_%=; \ #else
if r1 s< -0x80 goto l0_%=; \ "r1 = *(s8 *)(r10 - 1);"
r0 = 1; \ #endif
l1_%=: \ /* r1 with s8 range */
exit; \ "if r1 s> 0x7f goto l0_%=;"
l0_%=: \ "if r1 s< -0x80 goto l0_%=;"
r0 = 2; \ "r0 = 1;"
goto l1_%=; \ "l1_%=:"
" : "exit;"
"l0_%=:"
"r0 = 2;"
"goto l1_%=;"
:
: __imm(bpf_get_prandom_u32) : __imm(bpf_get_prandom_u32)
: __clobber_all); : __clobber_all);
} }
...@@ -77,20 +93,24 @@ __description("LDSX, S16 range checking") ...@@ -77,20 +93,24 @@ __description("LDSX, S16 range checking")
__success __success_unpriv __retval(1) __success __success_unpriv __retval(1)
__naked void ldsx_s16_range(void) __naked void ldsx_s16_range(void)
{ {
asm volatile (" \ asm volatile (
call %[bpf_get_prandom_u32]; \ "call %[bpf_get_prandom_u32];"
*(u64 *)(r10 - 8) = r0; \ "*(u64 *)(r10 - 8) = r0;"
r1 = *(s16 *)(r10 - 8); \ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* r1 with s16 range */ \ "r1 = *(s16 *)(r10 - 8);"
if r1 s> 0x7fff goto l0_%=; \ #else
if r1 s< -0x8000 goto l0_%=; \ "r1 = *(s16 *)(r10 - 2);"
r0 = 1; \ #endif
l1_%=: \ /* r1 with s16 range */
exit; \ "if r1 s> 0x7fff goto l0_%=;"
l0_%=: \ "if r1 s< -0x8000 goto l0_%=;"
r0 = 2; \ "r0 = 1;"
goto l1_%=; \ "l1_%=:"
" : "exit;"
"l0_%=:"
"r0 = 2;"
"goto l1_%=;"
:
: __imm(bpf_get_prandom_u32) : __imm(bpf_get_prandom_u32)
: __clobber_all); : __clobber_all);
} }
...@@ -100,20 +120,24 @@ __description("LDSX, S32 range checking") ...@@ -100,20 +120,24 @@ __description("LDSX, S32 range checking")
__success __success_unpriv __retval(1) __success __success_unpriv __retval(1)
__naked void ldsx_s32_range(void) __naked void ldsx_s32_range(void)
{ {
asm volatile (" \ asm volatile (
call %[bpf_get_prandom_u32]; \ "call %[bpf_get_prandom_u32];"
*(u64 *)(r10 - 8) = r0; \ "*(u64 *)(r10 - 8) = r0;"
r1 = *(s32 *)(r10 - 8); \ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* r1 with s16 range */ \ "r1 = *(s32 *)(r10 - 8);"
if r1 s> 0x7fffFFFF goto l0_%=; \ #else
if r1 s< -0x80000000 goto l0_%=; \ "r1 = *(s32 *)(r10 - 4);"
r0 = 1; \ #endif
l1_%=: \ /* r1 with s16 range */
exit; \ "if r1 s> 0x7fffFFFF goto l0_%=;"
l0_%=: \ "if r1 s< -0x80000000 goto l0_%=;"
r0 = 2; \ "r0 = 1;"
goto l1_%=; \ "l1_%=:"
" : "exit;"
"l0_%=:"
"r0 = 2;"
"goto l1_%=;"
:
: __imm(bpf_get_prandom_u32) : __imm(bpf_get_prandom_u32)
: __clobber_all); : __clobber_all);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment