Commit 06fca841 authored by Ilya Leoshkevich's avatar Ilya Leoshkevich Committed by Andrii Nakryiko

selftests/bpf: Use __BYTE_ORDER__

Use the compiler-defined __BYTE_ORDER__ instead of the libc-defined
__BYTE_ORDER for consistency.
Signed-off-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211026010831.748682-4-iii@linux.ibm.com
parent 3930198d
...@@ -7,12 +7,12 @@ ...@@ -7,12 +7,12 @@
#include <bpf/btf.h> #include <bpf/btf.h>
void test_btf_endian() { void test_btf_endian() {
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
enum btf_endianness endian = BTF_LITTLE_ENDIAN; enum btf_endianness endian = BTF_LITTLE_ENDIAN;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
enum btf_endianness endian = BTF_BIG_ENDIAN; enum btf_endianness endian = BTF_BIG_ENDIAN;
#else #else
#error "Unrecognized __BYTE_ORDER" #error "Unrecognized __BYTE_ORDER__"
#endif #endif
enum btf_endianness swap_endian = 1 - endian; enum btf_endianness swap_endian = 1 - endian;
struct btf *btf = NULL, *swap_btf = NULL; struct btf *btf = NULL, *swap_btf = NULL;
......
...@@ -124,7 +124,7 @@ static struct sysctl_test tests[] = { ...@@ -124,7 +124,7 @@ static struct sysctl_test tests[] = {
.descr = "ctx:write sysctl:write read ok narrow", .descr = "ctx:write sysctl:write read ok narrow",
.insns = { .insns = {
/* u64 w = (u16)write & 1; */ /* u64 w = (u16)write & 1; */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write)), offsetof(struct bpf_sysctl, write)),
#else #else
...@@ -184,7 +184,7 @@ static struct sysctl_test tests[] = { ...@@ -184,7 +184,7 @@ static struct sysctl_test tests[] = {
.descr = "ctx:file_pos sysctl:read read ok narrow", .descr = "ctx:file_pos sysctl:read read ok narrow",
.insns = { .insns = {
/* If (file_pos == X) */ /* If (file_pos == X) */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)), offsetof(struct bpf_sysctl, file_pos)),
#else #else
......
...@@ -502,7 +502,7 @@ ...@@ -502,7 +502,7 @@
"check skb->hash byte load permitted", "check skb->hash byte load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)), offsetof(struct __sk_buff, hash)),
#else #else
...@@ -537,7 +537,7 @@ ...@@ -537,7 +537,7 @@
"check skb->hash byte load permitted 3", "check skb->hash byte load permitted 3",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3), offsetof(struct __sk_buff, hash) + 3),
#else #else
...@@ -646,7 +646,7 @@ ...@@ -646,7 +646,7 @@
"check skb->hash half load permitted", "check skb->hash half load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)), offsetof(struct __sk_buff, hash)),
#else #else
...@@ -661,7 +661,7 @@ ...@@ -661,7 +661,7 @@
"check skb->hash half load permitted 2", "check skb->hash half load permitted 2",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2), offsetof(struct __sk_buff, hash) + 2),
#else #else
...@@ -676,7 +676,7 @@ ...@@ -676,7 +676,7 @@
"check skb->hash half load not permitted, unaligned 1", "check skb->hash half load not permitted, unaligned 1",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1), offsetof(struct __sk_buff, hash) + 1),
#else #else
...@@ -693,7 +693,7 @@ ...@@ -693,7 +693,7 @@
"check skb->hash half load not permitted, unaligned 3", "check skb->hash half load not permitted, unaligned 3",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3), offsetof(struct __sk_buff, hash) + 3),
#else #else
...@@ -951,7 +951,7 @@ ...@@ -951,7 +951,7 @@
"check skb->data half load not permitted", "check skb->data half load not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)), offsetof(struct __sk_buff, data)),
#else #else
......
...@@ -174,7 +174,7 @@ ...@@ -174,7 +174,7 @@
"check skb->tc_classid half load not permitted for lwt prog", "check skb->tc_classid half load not permitted for lwt prog",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)), offsetof(struct __sk_buff, tc_classid)),
#else #else
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
"check bpf_perf_event_data->sample_period byte load permitted", "check bpf_perf_event_data->sample_period byte load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)), offsetof(struct bpf_perf_event_data, sample_period)),
#else #else
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
"check bpf_perf_event_data->sample_period half load permitted", "check bpf_perf_event_data->sample_period half load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)), offsetof(struct bpf_perf_event_data, sample_period)),
#else #else
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
"check bpf_perf_event_data->sample_period word load permitted", "check bpf_perf_event_data->sample_period word load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)), offsetof(struct bpf_perf_event_data, sample_period)),
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment