Commit 5a31cc72 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'linux-kselftest-next-6.6-rc1' of...

Merge tag 'linux-kselftest-next-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest

Pull Kselftest updates from Shuah Khan:
 "A mix of fixes, enhancements, and new tests. Bulk of the changes
  enhance and fix rseq and resctrl tests.

  In addition, user_events, dmabuf-heaps and perf_events are added to
  default kselftest build and test coverage. A futex test fix, enhance
  prctl test coverage, and minor fixes are included in this update"

* tag 'linux-kselftest-next-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (32 commits)
  selftests: cachestat: use proper syscall number macro
  selftests: cachestat: properly link in librt
  selftests/futex: Order calls to futex_lock_pi
  selftests: Hook more tests into the build infrastructure
  selftests/user_events: Reenable build
  selftests/filesystems: Add six consecutive 'x' characters to mktemp
  selftests/rseq: Use rseq_unqual_scalar_typeof in macros
  selftests/rseq: Fix arm64 buggy load-acquire/store-release macros
  selftests/rseq: Implement rseq_unqual_scalar_typeof
  selftests/rseq: Fix CID_ID typo in Makefile
  selftests:prctl: add set-process-name to .gitignore
  selftests:prctl: Fix make clean override warning
  selftests/resctrl: Remove test type checks from cat_val()
  selftests/resctrl: Pass the real number of tests to show_cache_info()
  selftests/resctrl: Move CAT/CMT test global vars to function they are used in
  selftests/resctrl: Don't use variable argument list for ->setup()
  selftests/resctrl: Don't pass test name to fill_buf
  selftests/resctrl: Improve parameter consistency in fill_buf
  selftests/resctrl: Remove unnecessary startptr global from fill_buf
  selftests/resctrl: Remove "malloc_and_init_memory" param from run_fill_buf()
  ...
parents 36534782 9b1db732
......@@ -12,6 +12,7 @@ TARGETS += core
TARGETS += cpufreq
TARGETS += cpu-hotplug
TARGETS += damon
TARGETS += dmabuf-heaps
TARGETS += drivers/dma-buf
TARGETS += drivers/s390x/uvdevice
TARGETS += drivers/net/bonding
......@@ -57,6 +58,7 @@ TARGETS += net/mptcp
TARGETS += net/openvswitch
TARGETS += netfilter
TARGETS += nsfs
TARGETS += perf_events
TARGETS += pidfd
TARGETS += pid_namespace
TARGETS += powerpc
......@@ -89,7 +91,9 @@ endif
TARGETS += tmpfs
TARGETS += tpm2
TARGETS += tty
TARGETS += uevents
TARGETS += user
TARGETS += user_events
TARGETS += vDSO
TARGETS += mm
TARGETS += x86
......
......@@ -3,6 +3,6 @@ TEST_GEN_PROGS := test_cachestat
CFLAGS += $(KHDR_INCLUDES)
CFLAGS += -Wall
CFLAGS += -lrt
LDLIBS += -lrt
include ../lib.mk
......@@ -23,7 +23,6 @@ static const char * const dev_files[] = {
"/dev/zero", "/dev/null", "/dev/urandom",
"/proc/version", "/proc"
};
static const int cachestat_nr = 451;
void print_cachestat(struct cachestat *cs)
{
......@@ -144,7 +143,7 @@ static int test_cachestat(const char *filename, bool write_random, bool create,
}
}
syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
ksft_print_msg("Cachestat call returned %ld\n", syscall_ret);
......@@ -172,7 +171,7 @@ static int test_cachestat(const char *filename, bool write_random, bool create,
ksft_print_msg("fsync fails.\n");
ret = KSFT_FAIL;
} else {
syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
ksft_print_msg("Cachestat call (after fsync) returned %ld\n",
syscall_ret);
......@@ -233,7 +232,7 @@ bool test_cachestat_shmem(void)
goto close_fd;
}
syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
if (syscall_ret) {
ksft_print_msg("Cachestat returned non-zero.\n");
......
......@@ -12,7 +12,7 @@ set -u
set -o pipefail
BASE_DIR="$(dirname $0)"
TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXX)"
TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXXXX)"
IMG_PATH="${TMP_DIR}/fat.img"
MNT_PATH="${TMP_DIR}/mnt"
......
......@@ -24,6 +24,7 @@
static long timeout_ns = 100000; /* 100us default timeout */
static futex_t futex_pi;
static pthread_barrier_t barrier;
void usage(char *prog)
{
......@@ -48,6 +49,8 @@ void *get_pi_lock(void *arg)
if (ret != 0)
error("futex_lock_pi failed\n", ret);
pthread_barrier_wait(&barrier);
/* Blocks forever */
ret = futex_wait(&lock, 0, NULL, 0);
error("futex_wait failed\n", ret);
......@@ -130,6 +133,7 @@ int main(int argc, char *argv[])
basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
pthread_barrier_init(&barrier, NULL, 2);
pthread_create(&thread, NULL, get_pi_lock, NULL);
/* initialize relative timeout */
......@@ -163,6 +167,9 @@ int main(int argc, char *argv[])
res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
/* Wait until the other thread calls futex_lock_pi() */
pthread_barrier_wait(&barrier);
pthread_barrier_destroy(&barrier);
/*
* FUTEX_LOCK_PI with CLOCK_REALTIME
* Due to historical reasons, FUTEX_LOCK_PI supports only realtime
......
......@@ -3,3 +3,4 @@ disable-tsc-ctxt-sw-stress-test
disable-tsc-on-off-stress-test
disable-tsc-test
set-anon-vma-name-test
set-process-name
......@@ -5,12 +5,10 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
ifeq ($(ARCH),x86)
TEST_PROGS := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test \
disable-tsc-test set-anon-vma-name-test
disable-tsc-test set-anon-vma-name-test set-process-name
all: $(TEST_PROGS)
include ../lib.mk
clean:
rm -fr $(TEST_PROGS)
endif
endif
// SPDX-License-Identifier: GPL-2.0
/*
* This test covers the PR_SET_NAME functionality of prctl calls
*/
#include <errno.h>
#include <sys/prctl.h>
#include <string.h>
#include "../kselftest_harness.h"
#define CHANGE_NAME "changename"
#define EMPTY_NAME ""
#define TASK_COMM_LEN 16
int set_name(char *name)
{
int res;
res = prctl(PR_SET_NAME, name, NULL, NULL, NULL);
if (res < 0)
return -errno;
return res;
}
int check_is_name_correct(char *check_name)
{
char name[TASK_COMM_LEN];
int res;
res = prctl(PR_GET_NAME, name, NULL, NULL, NULL);
if (res < 0)
return -errno;
return !strcmp(name, check_name);
}
int check_null_pointer(char *check_name)
{
char *name = NULL;
int res;
res = prctl(PR_GET_NAME, name, NULL, NULL, NULL);
return res;
}
TEST(rename_process) {
EXPECT_GE(set_name(CHANGE_NAME), 0);
EXPECT_TRUE(check_is_name_correct(CHANGE_NAME));
EXPECT_GE(set_name(EMPTY_NAME), 0);
EXPECT_TRUE(check_is_name_correct(EMPTY_NAME));
EXPECT_GE(set_name(CHANGE_NAME), 0);
EXPECT_LT(check_null_pointer(CHANGE_NAME), 0);
}
TEST_HARNESS_MAIN
......@@ -7,4 +7,4 @@ TEST_GEN_PROGS := resctrl_tests
include ../lib.mk
$(OUTPUT)/resctrl_tests: $(wildcard *.c)
$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
......@@ -87,21 +87,19 @@ static int reset_enable_llc_perf(pid_t pid, int cpu_no)
static int get_llc_perf(unsigned long *llc_perf_miss)
{
__u64 total_misses;
int ret;
/* Stop counters after one span to get miss rate */
ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) {
ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
if (ret == -1) {
perror("Could not get llc misses through perf");
return -1;
}
total_misses = rf_cqm.values[0].value;
close(fd_lm);
*llc_perf_miss = total_misses;
return 0;
......@@ -212,7 +210,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
*/
int cat_val(struct resctrl_val_param *param)
{
int malloc_and_init_memory = 1, memflush = 1, operation = 0, ret = 0;
int memflush = 1, operation = 0, ret = 0;
char *resctrl_val = param->resctrl_val;
pid_t bm_pid;
......@@ -232,40 +230,38 @@ int cat_val(struct resctrl_val_param *param)
if (ret)
return ret;
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
initialize_llc_perf();
initialize_llc_perf();
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
ret = param->setup(1, param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
}
if (ret < 0)
break;
ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
if (ret)
break;
if (run_fill_buf(param->span, malloc_and_init_memory,
memflush, operation, resctrl_val)) {
fprintf(stderr, "Error-running fill buffer\n");
ret = -1;
break;
}
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
break;
} else {
ret = param->setup(param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
}
if (ret < 0)
break;
ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
if (ret)
break;
if (run_fill_buf(param->span, memflush, operation, true)) {
fprintf(stderr, "Error-running fill buffer\n");
ret = -1;
goto pe_close;
}
sleep(1);
ret = measure_cache_vals(param, bm_pid);
if (ret)
goto pe_close;
}
return ret;
pe_close:
close(fd_lm);
return ret;
}
/*
......@@ -282,7 +278,7 @@ int cat_val(struct resctrl_val_param *param)
* Return: 0 on success. non-zero on failure.
*/
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
unsigned long cache_span, unsigned long max_diff,
size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt)
{
......@@ -291,7 +287,7 @@ int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
long avg_diff = 0;
int ret;
avg_llc_val = sum_llc_val / (num_of_runs - 1);
avg_llc_val = sum_llc_val / num_of_runs;
avg_diff = (long)abs(cache_span - avg_llc_val);
diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
......@@ -304,7 +300,7 @@ int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
ksft_print_msg("Number of bits: %d\n", no_of_bits);
ksft_print_msg("Average LLC val: %lu\n", avg_llc_val);
ksft_print_msg("Cache span (%s): %lu\n", cmt ? "bytes" : "lines",
ksft_print_msg("Cache span (%s): %zu\n", cmt ? "bytes" : "lines",
cache_span);
return ret;
......
......@@ -17,27 +17,16 @@
#define MAX_DIFF_PERCENT 4
#define MAX_DIFF 1000000
static int count_of_bits;
static char cbm_mask[256];
static unsigned long long_mask;
static unsigned long cache_size;
/*
* Change schemata. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* Run 5 times in order to get average values.
*/
static int cat_setup(int num, ...)
static int cat_setup(struct resctrl_val_param *p)
{
struct resctrl_val_param *p;
char schemata[64];
va_list param;
int ret = 0;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
......@@ -88,7 +77,7 @@ static int check_results(struct resctrl_val_param *param)
no_of_bits = count_bits(param->mask);
return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
get_vendor() == ARCH_INTEL, false);
}
......@@ -102,14 +91,12 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
{
unsigned long l_mask, l_mask_1;
int ret, pipefd[2], sibling_cpu_no;
unsigned long cache_size = 0;
unsigned long long_mask;
char cbm_mask[256];
int count_of_bits;
char pipe_message;
cache_size = 0;
ret = remount_resctrlfs(true);
if (ret)
return ret;
/* Get default cbm mask for L3/L2 cache */
ret = get_cbm_mask(cache_type, cbm_mask);
if (ret)
......@@ -144,7 +131,6 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
struct resctrl_val_param param = {
.resctrl_val = CAT_STR,
.cpu_no = cpu_no,
.mum_resctrlfs = false,
.setup = cat_setup,
};
......@@ -227,8 +213,6 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
out:
cat_test_cleanup();
if (bm_pid)
umount_resctrlfs();
return ret;
}
......@@ -16,20 +16,8 @@
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
static int count_of_bits;
static char cbm_mask[256];
static unsigned long long_mask;
static unsigned long cache_size;
static int cmt_setup(int num, ...)
static int cmt_setup(struct resctrl_val_param *p)
{
struct resctrl_val_param *p;
va_list param;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
......@@ -71,7 +59,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
fclose(fp);
return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
true, true);
}
......@@ -82,14 +70,12 @@ void cmt_test_cleanup(void)
int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
{
unsigned long cache_size = 0;
unsigned long long_mask;
char cbm_mask[256];
int count_of_bits;
int ret;
cache_size = 0;
ret = remount_resctrlfs(true);
if (ret)
return ret;
if (!validate_resctrl_feature_request(CMT_STR))
return -1;
......@@ -117,7 +103,6 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.mum_resctrlfs = false,
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.span = cache_size * n / count_of_bits,
......@@ -126,7 +111,7 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
};
if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
sprintf(benchmark_cmd[1], "%lu", param.span);
sprintf(benchmark_cmd[1], "%zu", param.span);
remove(RESULT_FILE_NAME);
......
......@@ -22,8 +22,6 @@
#define PAGE_SIZE (4 * 1024)
#define MB (1024 * 1024)
static unsigned char *startptr;
static void sb(void)
{
#if defined(__i386) || defined(__x86_64)
......@@ -40,32 +38,32 @@ static void cl_flush(void *p)
#endif
}
static void mem_flush(void *p, size_t s)
static void mem_flush(unsigned char *buf, size_t buf_size)
{
char *cp = (char *)p;
unsigned char *cp = buf;
size_t i = 0;
s = s / CL_SIZE; /* mem size in cache llines */
buf_size = buf_size / CL_SIZE; /* mem size in cache lines */
for (i = 0; i < s; i++)
for (i = 0; i < buf_size; i++)
cl_flush(&cp[i * CL_SIZE]);
sb();
}
static void *malloc_and_init_memory(size_t s)
static void *malloc_and_init_memory(size_t buf_size)
{
void *p = NULL;
uint64_t *p64;
size_t s64;
int ret;
ret = posix_memalign(&p, PAGE_SIZE, s);
ret = posix_memalign(&p, PAGE_SIZE, buf_size);
if (ret < 0)
return NULL;
p64 = (uint64_t *)p;
s64 = s / sizeof(uint64_t);
s64 = buf_size / sizeof(uint64_t);
while (s64 > 0) {
*p64 = (uint64_t)rand();
......@@ -76,12 +74,13 @@ static void *malloc_and_init_memory(size_t s)
return p;
}
static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
static int fill_one_span_read(unsigned char *buf, size_t buf_size)
{
unsigned char *end_ptr = buf + buf_size;
unsigned char sum, *p;
sum = 0;
p = start_ptr;
p = buf;
while (p < end_ptr) {
sum += *p;
p += (CL_SIZE / 2);
......@@ -90,27 +89,26 @@ static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
return sum;
}
static
void fill_one_span_write(unsigned char *start_ptr, unsigned char *end_ptr)
static void fill_one_span_write(unsigned char *buf, size_t buf_size)
{
unsigned char *end_ptr = buf + buf_size;
unsigned char *p;
p = start_ptr;
p = buf;
while (p < end_ptr) {
*p = '1';
p += (CL_SIZE / 2);
}
}
static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
char *resctrl_val)
static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
{
int ret = 0;
FILE *fp;
while (1) {
ret = fill_one_span_read(start_ptr, end_ptr);
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
ret = fill_one_span_read(buf, buf_size);
if (once)
break;
}
......@@ -126,75 +124,52 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
return 0;
}
static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
char *resctrl_val)
static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once)
{
while (1) {
fill_one_span_write(start_ptr, end_ptr);
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
fill_one_span_write(buf, buf_size);
if (once)
break;
}
return 0;
}
static int
fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
int op, char *resctrl_val)
static int fill_cache(size_t buf_size, int memflush, int op, bool once)
{
unsigned char *start_ptr, *end_ptr;
unsigned long long i;
unsigned char *buf;
int ret;
if (malloc_and_init)
start_ptr = malloc_and_init_memory(buf_size);
else
start_ptr = malloc(buf_size);
if (!start_ptr)
buf = malloc_and_init_memory(buf_size);
if (!buf)
return -1;
startptr = start_ptr;
end_ptr = start_ptr + buf_size;
/*
* It's better to touch the memory once to avoid any compiler
* optimizations
*/
if (!malloc_and_init) {
for (i = 0; i < buf_size; i++)
*start_ptr++ = (unsigned char)rand();
}
start_ptr = startptr;
/* Flush the memory before using to avoid "cache hot pages" effect */
if (memflush)
mem_flush(start_ptr, buf_size);
mem_flush(buf, buf_size);
if (op == 0)
ret = fill_cache_read(start_ptr, end_ptr, resctrl_val);
ret = fill_cache_read(buf, buf_size, once);
else
ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
ret = fill_cache_write(buf, buf_size, once);
free(buf);
if (ret) {
printf("\n Error in fill cache read/write...\n");
return -1;
}
free(startptr);
return 0;
}
int run_fill_buf(unsigned long span, int malloc_and_init_memory,
int memflush, int op, char *resctrl_val)
int run_fill_buf(size_t span, int memflush, int op, bool once)
{
unsigned long long cache_size = span;
size_t cache_size = span;
int ret;
ret = fill_cache(cache_size, malloc_and_init_memory, memflush, op,
resctrl_val);
ret = fill_cache(cache_size, memflush, op, once);
if (ret) {
printf("\n Error in fill cache\n");
return -1;
......
......@@ -22,18 +22,12 @@
* con_mon grp, mon_grp in resctrl FS.
* For each allocation, run 5 times in order to get average values.
*/
static int mba_setup(int num, ...)
static int mba_setup(struct resctrl_val_param *p)
{
static int runs_per_allocation, allocation = 100;
struct resctrl_val_param *p;
char allocation_str[64];
va_list param;
int ret;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
if (runs_per_allocation >= NUM_OF_RUNS)
runs_per_allocation = 0;
......@@ -154,7 +148,6 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
.mum_resctrlfs = true,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mba_setup
......
......@@ -15,7 +15,7 @@
#define NUM_OF_RUNS 5
static int
show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, size_t span)
{
unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
......@@ -40,14 +40,14 @@ show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
ksft_print_msg("%s Check MBM diff within %d%%\n",
ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT);
ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
ksft_print_msg("Span (MB): %d\n", span);
ksft_print_msg("Span (MB): %zu\n", span / MB);
ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
return ret;
}
static int check_results(int span)
static int check_results(size_t span)
{
unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
char temp[1024], *token_array[8];
......@@ -86,16 +86,10 @@ static int check_results(int span)
return ret;
}
static int mbm_setup(int num, ...)
static int mbm_setup(struct resctrl_val_param *p)
{
struct resctrl_val_param *p;
va_list param;
int ret = 0;
va_start(param, num);
p = va_arg(param, struct resctrl_val_param *);
va_end(param);
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
......@@ -115,7 +109,7 @@ void mbm_test_cleanup(void)
remove(RESULT_FILE_NAME);
}
int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
.resctrl_val = MBM_STR,
......@@ -123,7 +117,6 @@ int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
.mongrp = "m1",
.span = span,
.cpu_no = cpu_no,
.mum_resctrlfs = true,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mbm_setup
......
......@@ -3,7 +3,6 @@
#ifndef RESCTRL_H
#define RESCTRL_H
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <errno.h>
#include <sched.h>
......@@ -43,6 +42,7 @@
do { \
perror(err_msg); \
kill(ppid, SIGKILL); \
umount_resctrlfs(); \
exit(EXIT_FAILURE); \
} while (0)
......@@ -53,7 +53,6 @@
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number to which the benchmark would be binded
* @span: Memory bytes accessed in each benchmark iteration
* @mum_resctrlfs: Should the resctrl FS be remounted?
* @filename: Name of file to which the o/p should be written
* @bw_report: Bandwidth report type (reads vs writes)
* @setup: Call back function to setup test environment
......@@ -63,13 +62,12 @@ struct resctrl_val_param {
char ctrlgrp[64];
char mongrp[64];
int cpu_no;
unsigned long span;
bool mum_resctrlfs;
size_t span;
char filename[64];
char *bw_report;
unsigned long mask;
int num_of_runs;
int (*setup)(int num, ...);
int (*setup)(struct resctrl_val_param *param);
};
#define MBM_STR "mbm"
......@@ -84,8 +82,8 @@ extern char llc_occup_path[1024];
int get_vendor(void);
bool check_resctrlfs_support(void);
int filter_dmesg(void);
int remount_resctrlfs(bool mum_resctrlfs);
int get_resource_id(int cpu_no, int *resource_id);
int mount_resctrlfs(void);
int umount_resctrlfs(void);
int validate_bw_report_request(char *bw_report);
bool validate_resctrl_feature_request(const char *resctrl_val);
......@@ -98,10 +96,9 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
char *resctrl_val);
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags);
int run_fill_buf(unsigned long span, int malloc_and_init_memory, int memflush,
int op, char *resctrl_va);
int run_fill_buf(size_t span, int memflush, int op, bool once);
int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd);
int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd);
void tests_cleanup(void);
void mbm_test_cleanup(void);
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
......@@ -120,7 +117,7 @@ void cmt_test_cleanup(void);
int get_core_sibling(int cpu_no);
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
unsigned long cache_span, unsigned long max_diff,
size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt);
......
......@@ -70,60 +70,81 @@ void tests_cleanup(void)
cat_test_cleanup();
}
static void run_mbm_test(bool has_ben, char **benchmark_cmd, int span,
static void run_mbm_test(char **benchmark_cmd, size_t span,
int cpu_no, char *bw_report)
{
int res;
ksft_print_msg("Starting MBM BW change ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
return;
goto umount;
}
if (!has_ben)
sprintf(benchmark_cmd[5], "%s", MBA_STR);
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBM: bw change\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
umount:
umount_resctrlfs();
}
static void run_mba_test(bool has_ben, char **benchmark_cmd, int span,
int cpu_no, char *bw_report)
static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
{
int res;
ksft_print_msg("Starting MBA Schemata change ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
return;
goto umount;
}
if (!has_ben)
sprintf(benchmark_cmd[1], "%d", span);
res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBA: schemata change\n");
umount:
umount_resctrlfs();
}
static void run_cmt_test(bool has_ben, char **benchmark_cmd, int cpu_no)
static void run_cmt_test(char **benchmark_cmd, int cpu_no)
{
int res;
ksft_print_msg("Starting CMT test ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(CMT_STR)) {
ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
return;
goto umount;
}
if (!has_ben)
sprintf(benchmark_cmd[5], "%s", CMT_STR);
res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
ksft_test_result(!res, "CMT: test\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
umount:
umount_resctrlfs();
}
static void run_cat_test(int cpu_no, int no_of_bits)
......@@ -132,22 +153,32 @@ static void run_cat_test(int cpu_no, int no_of_bits)
ksft_print_msg("Starting CAT test ...\n");
res = mount_resctrlfs();
if (res) {
ksft_exit_fail_msg("Failed to mount resctrl FS\n");
return;
}
if (!validate_resctrl_feature_request(CAT_STR)) {
ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
return;
goto umount;
}
res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
ksft_test_result(!res, "CAT: test\n");
umount:
umount_resctrlfs();
}
int main(int argc, char **argv)
{
bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
int c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 0;
char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
int ben_ind, ben_count, tests = 0;
size_t span = 250 * MB;
bool cat_test = true;
for (i = 0; i < argc; i++) {
......@@ -232,16 +263,15 @@ int main(int argc, char **argv)
benchmark_cmd[ben_count] = NULL;
} else {
/* If no benchmark is given by "-b" argument, use fill_buf. */
for (i = 0; i < 6; i++)
for (i = 0; i < 5; i++)
benchmark_cmd[i] = benchmark_cmd_area[i];
strcpy(benchmark_cmd[0], "fill_buf");
sprintf(benchmark_cmd[1], "%d", span);
sprintf(benchmark_cmd[1], "%zu", span);
strcpy(benchmark_cmd[2], "1");
strcpy(benchmark_cmd[3], "1");
strcpy(benchmark_cmd[4], "0");
strcpy(benchmark_cmd[5], "");
benchmark_cmd[6] = NULL;
strcpy(benchmark_cmd[3], "0");
strcpy(benchmark_cmd[4], "false");
benchmark_cmd[5] = NULL;
}
sprintf(bw_report, "reads");
......@@ -250,23 +280,24 @@ int main(int argc, char **argv)
if (!check_resctrlfs_support())
return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
if (umount_resctrlfs())
return ksft_exit_skip("resctrl FS unmount failed.\n");
filter_dmesg();
ksft_set_plan(tests ? : 4);
if (mbm_test)
run_mbm_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
if (mba_test)
run_mba_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
run_mba_test(benchmark_cmd, cpu_no, bw_report);
if (cmt_test)
run_cmt_test(has_ben, benchmark_cmd, cpu_no);
run_cmt_test(benchmark_cmd, cpu_no);
if (cat_test)
run_cat_test(cpu_no, no_of_bits);
umount_resctrlfs();
ksft_finished();
}
......@@ -648,10 +648,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
return ret;
}
ret = remount_resctrlfs(param->mum_resctrlfs);
if (ret)
return ret;
/*
* If benchmark wasn't successfully started by child, then child should
* kill parent, so save parent's pid
......@@ -763,7 +759,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
ret = param->setup(1, param);
ret = param->setup(param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
......@@ -788,7 +784,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
signal_handler_unregister();
out:
kill(bm_pid, SIGKILL);
umount_resctrlfs();
return ret;
}
......@@ -48,29 +48,20 @@ static int find_resctrl_mount(char *buffer)
}
/*
* remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
* @mum_resctrlfs: Should the resctrl FS be remounted?
* mount_resctrlfs - Mount resctrl FS at /sys/fs/resctrl
*
* If not mounted, mount it.
* If mounted and mum_resctrlfs then remount resctrl FS.
* If mounted and !mum_resctrlfs then noop
* Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid
* pre-existing settings interfering with the test results.
*
* Return: 0 on success, non-zero on failure
*/
int remount_resctrlfs(bool mum_resctrlfs)
int mount_resctrlfs(void)
{
char mountpoint[256];
int ret;
ret = find_resctrl_mount(mountpoint);
if (ret)
strcpy(mountpoint, RESCTRL_PATH);
if (!ret && mum_resctrlfs && umount(mountpoint))
ksft_print_msg("Fail: unmounting \"%s\"\n", mountpoint);
if (!ret && !mum_resctrlfs)
return 0;
ret = find_resctrl_mount(NULL);
if (ret != -ENOENT)
return -1;
ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
......@@ -82,10 +73,16 @@ int remount_resctrlfs(bool mum_resctrlfs)
int umount_resctrlfs(void)
{
if (find_resctrl_mount(NULL))
char mountpoint[256];
int ret;
ret = find_resctrl_mount(mountpoint);
if (ret == -ENOENT)
return 0;
if (ret)
return ret;
if (umount(RESCTRL_PATH)) {
if (umount(mountpoint)) {
perror("# Unable to umount resctrl");
return errno;
......@@ -305,10 +302,10 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
*/
void run_benchmark(int signum, siginfo_t *info, void *ucontext)
{
int operation, ret, malloc_and_init_memory, memflush;
unsigned long span, buffer_span;
int operation, ret, memflush;
char **benchmark_cmd;
char resctrl_val[64];
size_t span;
bool once;
FILE *fp;
benchmark_cmd = info->si_ptr;
......@@ -324,18 +321,16 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
/* Execute default fill_buf benchmark */
span = strtoul(benchmark_cmd[1], NULL, 10);
malloc_and_init_memory = atoi(benchmark_cmd[2]);
memflush = atoi(benchmark_cmd[3]);
operation = atoi(benchmark_cmd[4]);
sprintf(resctrl_val, "%s", benchmark_cmd[5]);
if (strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
buffer_span = span * MB;
memflush = atoi(benchmark_cmd[2]);
operation = atoi(benchmark_cmd[3]);
if (!strcmp(benchmark_cmd[4], "true"))
once = true;
else if (!strcmp(benchmark_cmd[4], "false"))
once = false;
else
buffer_span = span;
PARENT_EXIT("Invalid once parameter");
if (run_fill_buf(buffer_span, malloc_and_init_memory, memflush,
operation, resctrl_val))
if (run_fill_buf(span, memflush, operation, once))
fprintf(stderr, "Error in running fill buffer\n");
} else {
/* Execute specified benchmark */
......@@ -611,7 +606,8 @@ char *fgrep(FILE *inf, const char *str)
* validate_resctrl_feature_request - Check if requested feature is valid.
* @resctrl_val: Requested feature
*
* Return: True if the feature is supported, else false
* Return: True if the feature is supported, else false. False is also
* returned if resctrl FS is not mounted.
*/
bool validate_resctrl_feature_request(const char *resctrl_val)
{
......@@ -619,11 +615,13 @@ bool validate_resctrl_feature_request(const char *resctrl_val)
bool found = false;
char *res;
FILE *inf;
int ret;
if (!resctrl_val)
return false;
if (remount_resctrlfs(false))
ret = find_resctrl_mount(NULL);
if (ret)
return false;
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
......
......@@ -33,7 +33,7 @@ $(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
$(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
$(OUTPUT)/basic_percpu_ops_mm_cid_test: basic_percpu_ops_test.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
$(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID_ID $< $(LDLIBS) -lrseq -o $@
$(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID $< $(LDLIBS) -lrseq -o $@
$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
rseq.h rseq-*.h
......
......@@ -33,4 +33,30 @@
#define RSEQ_COMBINE_TOKENS(_tokena, _tokenb) \
RSEQ__COMBINE_TOKENS(_tokena, _tokenb)
#ifdef __cplusplus
#define rseq_unqual_scalar_typeof(x) \
std::remove_cv<std::remove_reference<decltype(x)>::type>::type
#else
#define rseq_scalar_type_to_expr(type) \
unsigned type: (unsigned type)0, \
signed type: (signed type)0
/*
* Use C11 _Generic to express unqualified type from expression. This removes
* volatile qualifier from expression type.
*/
#define rseq_unqual_scalar_typeof(x) \
__typeof__( \
_Generic((x), \
char: (char)0, \
rseq_scalar_type_to_expr(char), \
rseq_scalar_type_to_expr(short), \
rseq_scalar_type_to_expr(int), \
rseq_scalar_type_to_expr(long), \
rseq_scalar_type_to_expr(long long), \
default: (x) \
) \
)
#endif
#endif /* RSEQ_COMPILER_H_ */
......@@ -66,7 +66,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_smp_mb(); \
____p1; \
})
......@@ -76,7 +76,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_smp_mb(); \
RSEQ_WRITE_ONCE(*p, v); \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
......
......@@ -27,59 +27,61 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*p) ____p1; \
switch (sizeof(*p)) { \
union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u; \
switch (sizeof(*(p))) { \
case 1: \
asm volatile ("ldarb %w0, %1" \
: "=r" (*(__u8 *)p) \
: "Q" (*p) : "memory"); \
__asm__ __volatile__ ("ldarb %w0, %1" \
: "=r" (*(__u8 *)__u.__c) \
: "Q" (*(p)) : "memory"); \
break; \
case 2: \
asm volatile ("ldarh %w0, %1" \
: "=r" (*(__u16 *)p) \
: "Q" (*p) : "memory"); \
__asm__ __volatile__ ("ldarh %w0, %1" \
: "=r" (*(__u16 *)__u.__c) \
: "Q" (*(p)) : "memory"); \
break; \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (*(__u32 *)p) \
: "Q" (*p) : "memory"); \
__asm__ __volatile__ ("ldar %w0, %1" \
: "=r" (*(__u32 *)__u.__c) \
: "Q" (*(p)) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
: "=r" (*(__u64 *)p) \
: "Q" (*p) : "memory"); \
__asm__ __volatile__ ("ldar %0, %1" \
: "=r" (*(__u64 *)__u.__c) \
: "Q" (*(p)) : "memory"); \
break; \
} \
____p1; \
(rseq_unqual_scalar_typeof(*(p)))__u.__val; \
})
#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
#define rseq_smp_store_release(p, v) \
do { \
switch (sizeof(*p)) { \
union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u = \
{ .__val = (rseq_unqual_scalar_typeof(*(p))) (v) }; \
switch (sizeof(*(p))) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
: "=Q" (*p) \
: "r" ((__u8)v) \
__asm__ __volatile__ ("stlrb %w1, %0" \
: "=Q" (*(p)) \
: "r" (*(__u8 *)__u.__c) \
: "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
: "=Q" (*p) \
: "r" ((__u16)v) \
__asm__ __volatile__ ("stlrh %w1, %0" \
: "=Q" (*(p)) \
: "r" (*(__u16 *)__u.__c) \
: "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*p) \
: "r" ((__u32)v) \
__asm__ __volatile__ ("stlr %w1, %0" \
: "=Q" (*(p)) \
: "r" (*(__u32 *)__u.__c) \
: "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
: "=Q" (*p) \
: "r" ((__u64)v) \
__asm__ __volatile__ ("stlr %1, %0" \
: "=Q" (*(p)) \
: "r" (*(__u64 *)__u.__c) \
: "memory"); \
break; \
} \
......
......@@ -45,7 +45,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_smp_mb(); \
____p1; \
})
......@@ -55,7 +55,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_smp_mb(); \
RSEQ_WRITE_ONCE(*p, v); \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#if _MIPS_SZLONG == 64
......
......@@ -23,7 +23,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_smp_lwsync(); \
____p1; \
})
......@@ -33,7 +33,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_smp_lwsync(); \
RSEQ_WRITE_ONCE(*p, v); \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
/*
......
......@@ -36,8 +36,8 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
RISCV_FENCE(r, rw) \
rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
RISCV_FENCE(r, rw); \
____p1; \
})
......@@ -46,7 +46,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
RISCV_FENCE(rw, w); \
RSEQ_WRITE_ONCE(*(p), v); \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
......
......@@ -15,7 +15,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_barrier(); \
____p1; \
})
......@@ -25,7 +25,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_barrier(); \
RSEQ_WRITE_ONCE(*p, v); \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#ifdef __s390x__
......
......@@ -42,7 +42,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
__typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_barrier(); \
____p1; \
})
......@@ -52,7 +52,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_barrier(); \
RSEQ_WRITE_ONCE(*p, v); \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
......
......@@ -2,14 +2,6 @@
CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
LDLIBS += -lrt -lpthread -lm
# Note:
# This test depends on <linux/user_events.h> exported in uapi
# The following commit removed user_events.h out of uapi:
# commit 5cfff569cab8bf544bab62c911c5d6efd5af5e05
# tracing: Move user_events.h temporarily out of include/uapi
# This test will not compile until user_events.h is added
# back to uapi.
TEST_GEN_PROGS = ftrace_test dyn_test perf_test abi_test
TEST_FILES := settings
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment