Commit e02daf4c authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'core_reloc fixes for s390'

Ilya Leoshkevich says:

====================

v2: https://lore.kernel.org/bpf/20211025131214.731972-1-iii@linux.ibm.com/
v2 -> v3: Split the fix from the cleanup (Daniel).

v1: https://lore.kernel.org/bpf/20211021234653.643302-1-iii@linux.ibm.com/
v1 -> v2: Drop bpf_core_calc_field_relo() restructuring, split the
          __BYTE_ORDER__ change (Andrii).

Hi,

this series fixes test failures in core_reloc on s390.

Patch 1 fixes an endianness bug with __BYTE_ORDER vs __BYTE_ORDER__.
Patches 2-5 make the rest of the code consistent in that respect.
Patch 6 fixes an endianness issue in test_core_reloc_mods.

Best regards,
Ilya
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 124c6003 2e2c6d3f
...@@ -62,9 +62,9 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count); ...@@ -62,9 +62,9 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count);
#define EXPAND(...) __VA_ARGS__ #define EXPAND(...) __VA_ARGS__
/* Ensure that we load the logically correct offset. */ /* Ensure that we load the logically correct offset. */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32) #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
#else #else
#error "Unknown endianness" #error "Unknown endianness"
...@@ -85,10 +85,10 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count); ...@@ -85,10 +85,10 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count);
#elif __BITS_PER_LONG == 64 #elif __BITS_PER_LONG == 64
/* Ensure that we load the logically correct offset. */ /* Ensure that we load the logically correct offset. */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define ENDIAN(_lo, _hi) _lo, _hi #define ENDIAN(_lo, _hi) _lo, _hi
#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32) #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define ENDIAN(_lo, _hi) _hi, _lo #define ENDIAN(_lo, _hi) _hi, _lo
#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
#endif #endif
......
...@@ -40,7 +40,7 @@ enum bpf_enum_value_kind { ...@@ -40,7 +40,7 @@ enum bpf_enum_value_kind {
#define __CORE_RELO(src, field, info) \ #define __CORE_RELO(src, field, info) \
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info) __builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read_kernel( \ bpf_probe_read_kernel( \
(void *)dst, \ (void *)dst, \
......
...@@ -538,9 +538,9 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) ...@@ -538,9 +538,9 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
static bool is_host_big_endian(void) static bool is_host_big_endian(void)
{ {
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return false; return false;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return true; return true;
#else #else
# error "Unrecognized __BYTE_ORDER__" # error "Unrecognized __BYTE_ORDER__"
......
...@@ -1576,11 +1576,11 @@ static int btf_dump_get_bitfield_value(struct btf_dump *d, ...@@ -1576,11 +1576,11 @@ static int btf_dump_get_bitfield_value(struct btf_dump *d,
/* Bitfield value retrieval is done in two steps; first relevant bytes are /* Bitfield value retrieval is done in two steps; first relevant bytes are
* stored in num, then we left/right shift num to eliminate irrelevant bits. * stored in num, then we left/right shift num to eliminate irrelevant bits.
*/ */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
for (i = t->size - 1; i >= 0; i--) for (i = t->size - 1; i >= 0; i--)
num = num * 256 + bytes[i]; num = num * 256 + bytes[i];
nr_copy_bits = bit_sz + bits_offset; nr_copy_bits = bit_sz + bits_offset;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
for (i = 0; i < t->size; i++) for (i = 0; i < t->size; i++)
num = num * 256 + bytes[i]; num = num * 256 + bytes[i];
nr_copy_bits = t->size * 8 - bits_offset; nr_copy_bits = t->size * 8 - bits_offset;
...@@ -1700,10 +1700,10 @@ static int btf_dump_int_data(struct btf_dump *d, ...@@ -1700,10 +1700,10 @@ static int btf_dump_int_data(struct btf_dump *d,
/* avoid use of __int128 as some 32-bit platforms do not /* avoid use of __int128 as some 32-bit platforms do not
* support it. * support it.
*/ */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
lsi = ints[0]; lsi = ints[0];
msi = ints[1]; msi = ints[1];
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
lsi = ints[1]; lsi = ints[1];
msi = ints[0]; msi = ints[0];
#else #else
......
...@@ -1299,10 +1299,10 @@ static int bpf_object__elf_init(struct bpf_object *obj) ...@@ -1299,10 +1299,10 @@ static int bpf_object__elf_init(struct bpf_object *obj)
static int bpf_object__check_endianness(struct bpf_object *obj) static int bpf_object__check_endianness(struct bpf_object *obj)
{ {
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
return 0; return 0;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
return 0; return 0;
#else #else
......
...@@ -323,12 +323,12 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) ...@@ -323,12 +323,12 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
linker->elf_hdr->e_machine = EM_BPF; linker->elf_hdr->e_machine = EM_BPF;
linker->elf_hdr->e_type = ET_REL; linker->elf_hdr->e_type = ET_REL;
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB; linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB; linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
#else #else
#error "Unknown __BYTE_ORDER" #error "Unknown __BYTE_ORDER__"
#endif #endif
/* STRTAB */ /* STRTAB */
...@@ -538,12 +538,12 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, ...@@ -538,12 +538,12 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts, const struct bpf_linker_file_opts *opts,
struct src_obj *obj) struct src_obj *obj)
{ {
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
const int host_endianness = ELFDATA2LSB; const int host_endianness = ELFDATA2LSB;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const int host_endianness = ELFDATA2MSB; const int host_endianness = ELFDATA2MSB;
#else #else
#error "Unknown __BYTE_ORDER" #error "Unknown __BYTE_ORDER__"
#endif #endif
int err = 0; int err = 0;
Elf_Scn *scn; Elf_Scn *scn;
......
...@@ -662,7 +662,7 @@ static int bpf_core_calc_field_relo(const char *prog_name, ...@@ -662,7 +662,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
*validate = true; /* signedness is never ambiguous */ *validate = true; /* signedness is never ambiguous */
break; break;
case BPF_FIELD_LSHIFT_U64: case BPF_FIELD_LSHIFT_U64:
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
*val = 64 - (bit_off + bit_sz - byte_off * 8); *val = 64 - (bit_off + bit_sz - byte_off * 8);
#else #else
*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
......
...@@ -7,12 +7,12 @@ ...@@ -7,12 +7,12 @@
#include <bpf/btf.h> #include <bpf/btf.h>
void test_btf_endian() { void test_btf_endian() {
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
enum btf_endianness endian = BTF_LITTLE_ENDIAN; enum btf_endianness endian = BTF_LITTLE_ENDIAN;
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
enum btf_endianness endian = BTF_BIG_ENDIAN; enum btf_endianness endian = BTF_BIG_ENDIAN;
#else #else
#error "Unrecognized __BYTE_ORDER" #error "Unrecognized __BYTE_ORDER__"
#endif #endif
enum btf_endianness swap_endian = 1 - endian; enum btf_endianness swap_endian = 1 - endian;
struct btf *btf = NULL, *swap_btf = NULL; struct btf *btf = NULL, *swap_btf = NULL;
......
...@@ -42,7 +42,16 @@ struct core_reloc_mods { ...@@ -42,7 +42,16 @@ struct core_reloc_mods {
core_reloc_mods_substruct_t h; core_reloc_mods_substruct_t h;
}; };
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src) #define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
#else
#define CORE_READ(dst, src) ({ \
int __sz = sizeof(*(dst)) < sizeof(*(src)) ? sizeof(*(dst)) : \
sizeof(*(src)); \
bpf_core_read((char *)(dst) + sizeof(*(dst)) - __sz, __sz, \
(const char *)(src) + sizeof(*(src)) - __sz); \
})
#endif
SEC("raw_tracepoint/sys_enter") SEC("raw_tracepoint/sys_enter")
int test_core_mods(void *ctx) int test_core_mods(void *ctx)
......
...@@ -124,7 +124,7 @@ static struct sysctl_test tests[] = { ...@@ -124,7 +124,7 @@ static struct sysctl_test tests[] = {
.descr = "ctx:write sysctl:write read ok narrow", .descr = "ctx:write sysctl:write read ok narrow",
.insns = { .insns = {
/* u64 w = (u16)write & 1; */ /* u64 w = (u16)write & 1; */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, write)), offsetof(struct bpf_sysctl, write)),
#else #else
...@@ -184,7 +184,7 @@ static struct sysctl_test tests[] = { ...@@ -184,7 +184,7 @@ static struct sysctl_test tests[] = {
.descr = "ctx:file_pos sysctl:read read ok narrow", .descr = "ctx:file_pos sysctl:read read ok narrow",
.insns = { .insns = {
/* If (file_pos == X) */ /* If (file_pos == X) */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)), offsetof(struct bpf_sysctl, file_pos)),
#else #else
......
...@@ -502,7 +502,7 @@ ...@@ -502,7 +502,7 @@
"check skb->hash byte load permitted", "check skb->hash byte load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)), offsetof(struct __sk_buff, hash)),
#else #else
...@@ -537,7 +537,7 @@ ...@@ -537,7 +537,7 @@
"check skb->hash byte load permitted 3", "check skb->hash byte load permitted 3",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3), offsetof(struct __sk_buff, hash) + 3),
#else #else
...@@ -646,7 +646,7 @@ ...@@ -646,7 +646,7 @@
"check skb->hash half load permitted", "check skb->hash half load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)), offsetof(struct __sk_buff, hash)),
#else #else
...@@ -661,7 +661,7 @@ ...@@ -661,7 +661,7 @@
"check skb->hash half load permitted 2", "check skb->hash half load permitted 2",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2), offsetof(struct __sk_buff, hash) + 2),
#else #else
...@@ -676,7 +676,7 @@ ...@@ -676,7 +676,7 @@
"check skb->hash half load not permitted, unaligned 1", "check skb->hash half load not permitted, unaligned 1",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1), offsetof(struct __sk_buff, hash) + 1),
#else #else
...@@ -693,7 +693,7 @@ ...@@ -693,7 +693,7 @@
"check skb->hash half load not permitted, unaligned 3", "check skb->hash half load not permitted, unaligned 3",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3), offsetof(struct __sk_buff, hash) + 3),
#else #else
...@@ -951,7 +951,7 @@ ...@@ -951,7 +951,7 @@
"check skb->data half load not permitted", "check skb->data half load not permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data)), offsetof(struct __sk_buff, data)),
#else #else
......
...@@ -174,7 +174,7 @@ ...@@ -174,7 +174,7 @@
"check skb->tc_classid half load not permitted for lwt prog", "check skb->tc_classid half load not permitted for lwt prog",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)), offsetof(struct __sk_buff, tc_classid)),
#else #else
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
"check bpf_perf_event_data->sample_period byte load permitted", "check bpf_perf_event_data->sample_period byte load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)), offsetof(struct bpf_perf_event_data, sample_period)),
#else #else
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
"check bpf_perf_event_data->sample_period half load permitted", "check bpf_perf_event_data->sample_period half load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)), offsetof(struct bpf_perf_event_data, sample_period)),
#else #else
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
"check bpf_perf_event_data->sample_period word load permitted", "check bpf_perf_event_data->sample_period word load permitted",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_perf_event_data, sample_period)), offsetof(struct bpf_perf_event_data, sample_period)),
#else #else
......
...@@ -276,12 +276,12 @@ int seccomp(unsigned int op, unsigned int flags, void *args) ...@@ -276,12 +276,12 @@ int seccomp(unsigned int op, unsigned int flags, void *args)
} }
#endif #endif
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
#elif __BYTE_ORDER == __BIG_ENDIAN #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
#else #else
#error "wut? Unknown __BYTE_ORDER?!" #error "wut? Unknown __BYTE_ORDER__?!"
#endif #endif
#define SIBLING_EXIT_UNKILLED 0xbadbeef #define SIBLING_EXIT_UNKILLED 0xbadbeef
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment