Commit 139711f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "16 patches

  Subsystems affected by this patch series: mm (selftests, hugetlb,
  pagecache, mremap, kasan, and slub), kbuild, checkpatch, misc, and
  lib"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: slub: call account_slab_page() after slab page initialization
  zlib: move EXPORT_SYMBOL() and MODULE_LICENSE() out of dfltcc_syms.c
  lib/zlib: fix inflating zlib streams on s390
  lib/genalloc: fix the overflow when size is too big
  kdev_t: always inline major/minor helper functions
  sizes.h: add SZ_8G/SZ_16G/SZ_32G macros
  local64.h: make <asm/local64.h> mandatory
  kasan: fix null pointer dereference in kasan_record_aux_stack
  mm: generalise COW SMC TLB flushing race comment
  mm/mremap.c: fix extent calculation
  mm: memmap defer init doesn't work as expected
  mm: add prototype for __add_to_page_cache_locked()
  checkpatch: prefer strscpy to strlcpy
  Revert "kbuild: avoid static_assert for genksyms"
  mm/hugetlb: fix deadlock in hugetlb_cow error path
  selftests/vm: fix building protection keys test
parents dea8dcf2 1f3147b4
#include <asm-generic/local64.h>
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h
......@@ -2,7 +2,6 @@
generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += local64.h
generic-y += parport.h
generated-y += mach-types.h
......
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h
......
......@@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += qrwlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
......@@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += spinlock.h
......@@ -2,5 +2,4 @@
generic-y += extable.h
generic-y += iomap.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
#include <asm-generic/local64.h>
......@@ -536,7 +536,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0;
}
......@@ -546,7 +546,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
memmap_init_zone(size, nid, zone, start_pfn,
memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
......
......@@ -2,6 +2,5 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += spinlock.h
......@@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += syscalls.h
......
......@@ -6,7 +6,6 @@ generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += qrwlock.h
......
......@@ -4,6 +4,5 @@ generic-y += cmpxchg.h
generic-y += export.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += parport.h
generic-y += user.h
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qspinlock_types.h
generic-y += qspinlock.h
......
......@@ -3,6 +3,5 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += user.h
......@@ -5,7 +5,6 @@ generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += vtime.h
......
......@@ -3,6 +3,5 @@ generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += user.h
generic-y += vmlinux.lds.h
......@@ -7,5 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
......@@ -6,5 +6,4 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
#include <asm-generic/local64.h>
......@@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += qrwlock.h
......
......@@ -34,6 +34,7 @@ mandatory-y += kmap_size.h
mandatory-y += kprobes.h
mandatory-y += linkage.h
mandatory-y += local.h
mandatory-y += local64.h
mandatory-y += mm-arch-hooks.h
mandatory-y += mmiowb.h
mandatory-y += mmu.h
......
......@@ -77,9 +77,4 @@
#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
#ifdef __GENKSYMS__
/* genksyms gets confused by _Static_assert */
#define _Static_assert(expr, ...)
#endif
#endif /* _LINUX_BUILD_BUG_H */
......@@ -21,61 +21,61 @@
})
/* acceptable for old filesystems */
static inline bool old_valid_dev(dev_t dev)
static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
static inline u16 old_encode_dev(dev_t dev)
static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
static inline dev_t old_decode_dev(u16 val)
static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
static inline u32 new_encode_dev(dev_t dev)
static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
static inline dev_t new_decode_dev(u32 dev)
static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
static inline u64 huge_encode_dev(dev_t dev)
static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
static inline dev_t huge_decode_dev(u64 dev)
static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
static inline int sysv_valid_dev(dev_t dev)
static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
static inline u32 sysv_encode_dev(dev_t dev)
static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
static inline unsigned sysv_major(u32 dev)
static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
static inline unsigned sysv_minor(u32 dev)
static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
......
......@@ -216,6 +216,13 @@ int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
/*
* Any attempt to mark this function as static leads to build failure
* when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
* is referred to by BPF code. This must be visible for error injection.
*/
int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp, void **shadowp);
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
......@@ -2432,8 +2439,9 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
enum meminit_context, struct vmem_altmap *, int migratetype);
extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
......
......@@ -44,6 +44,9 @@
#define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
#define SZ_8G _AC(0x200000000, ULL)
#define SZ_16G _AC(0x400000000, ULL)
#define SZ_32G _AC(0x800000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
......@@ -81,14 +81,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
static int bitmap_set_ll(unsigned long *map, int start, int nr)
static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
const unsigned long size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
while (nr >= bits_to_set) {
if (set_bits_ll(p, mask_to_set))
return nr;
nr -= bits_to_set;
......@@ -116,14 +116,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
* users clear the same bit, one user will return remain bits,
* otherwise return 0.
*/
static int bitmap_clear_ll(unsigned long *map, int start, int nr)
static unsigned long
bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
const unsigned long size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
while (nr >= bits_to_clear) {
if (clear_bits_ll(p, mask_to_clear))
return nr;
nr -= bits_to_clear;
......@@ -183,8 +184,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph
size_t size, int nid, void *owner)
{
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) +
unsigned long nbits = size >> pool->min_alloc_order;
unsigned long nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = vzalloc_node(nbytes, nid);
......@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool)
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
int bit, end_bit;
unsigned long bit, end_bit;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
......@@ -278,7 +279,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
int nbits, start_bit, end_bit, remain;
unsigned long nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
......@@ -487,7 +488,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
int start_bit, nbits, remain;
unsigned long start_bit, nbits, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
......@@ -755,7 +756,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
while (index < size) {
int next_bit = find_next_bit(map, size, index + nr);
unsigned long next_bit = find_next_bit(map, size, index + nr);
if ((next_bit - index) < len) {
len = next_bit - index;
start_bit = index;
......
......@@ -8,4 +8,4 @@
obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
// SPDX-License-Identifier: Zlib
/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
#include <linux/zutil.h>
#include <linux/export.h>
#include <linux/module.h>
#include "dfltcc_util.h"
#include "dfltcc.h"
......@@ -53,3 +54,6 @@ void dfltcc_reset(
dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
dfltcc_state->param.ribm = DFLTCC_RIBM;
}
EXPORT_SYMBOL(dfltcc_reset);
MODULE_LICENSE("GPL");
......@@ -4,6 +4,7 @@
#include "dfltcc_util.h"
#include "dfltcc.h"
#include <asm/setup.h>
#include <linux/export.h>
#include <linux/zutil.h>
/*
......@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
return 1;
}
EXPORT_SYMBOL(dfltcc_can_deflate);
static void dfltcc_gdht(
z_streamp strm
......@@ -277,3 +279,4 @@ int dfltcc_deflate(
goto again; /* deflate() must use all input or all output */
return 1;
}
EXPORT_SYMBOL(dfltcc_deflate);
......@@ -125,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
if (param->hl)
param->nt = 0; /* Honor history for the first block */
param->cv = state->flags ? REVERSE(state->check) : state->check;
param->cv = state->check;
/* Inflate */
do {
......@@ -138,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
state->bits = param->sbb;
state->whave = param->hl;
state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
state->check = state->flags ? REVERSE(param->cv) : param->cv;
state->check = param->cv;
if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
/* Report an error if stream is corrupted */
state->mode = BAD;
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/zlib_dfltcc/dfltcc_syms.c
*
* Exported symbols for the s390 zlib dfltcc support.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/zlib.h>
#include "dfltcc.h"
EXPORT_SYMBOL(dfltcc_can_deflate);
EXPORT_SYMBOL(dfltcc_deflate);
EXPORT_SYMBOL(dfltcc_reset);
MODULE_LICENSE("GPL");
......@@ -4105,10 +4105,30 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* may get SIGKILLed if it later faults.
*/
if (outside_reserve) {
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t idx;
u32 hash;
put_page(old_page);
BUG_ON(huge_pte_none(pte));
/*
* Drop hugetlb_fault_mutex and i_mmap_rwsem before
* unmapping. unmapping needs to hold i_mmap_rwsem
* in write mode. Dropping i_mmap_rwsem in read mode
* here is OK as COW mappings do not interact with
* PMD sharing.
*
* Reacquire both after unmap operation.
*/
idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
i_mmap_unlock_read(mapping);
unmap_ref_private(mm, vma, old_page, haddr);
BUG_ON(huge_pte_none(pte));
i_mmap_lock_read(mapping);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
......
......@@ -337,6 +337,8 @@ void kasan_record_aux_stack(void *addr)
cache = page->slab_cache;
object = nearest_obj(cache, page, addr);
alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta)
return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
......
......@@ -2892,11 +2892,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
entry = mk_pte(new_page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/*
* Clear the pte entry and flush it first, before updating the
* pte with the new entry. This will avoid a race condition
* seen in the presence of one thread doing SMC and another
* thread doing COW.
* pte with the new entry, to keep TLBs on different CPUs in
* sync. This code used to set the new PTE then flush TLBs, but
* that left a window where the new PTE could be loaded into
* some TLBs while the old PTE remains in others.
*/
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
......
......@@ -713,7 +713,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
* expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe
*/
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 0,
MEMINIT_HOTPLUG, altmap, migratetype);
set_zone_contiguous(zone);
......
......@@ -358,7 +358,9 @@ static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
next = (old_addr + size) & mask;
/* even if next overflowed, extent below will be ok */
extent = (next > old_end) ? old_end - old_addr : next - old_addr;
extent = next - old_addr;
if (extent > old_end - old_addr)
extent = old_end - old_addr;
next = (new_addr + size) & mask;
if (extent > next - new_addr)
extent = next - new_addr;
......
......@@ -423,6 +423,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
return false;
if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
return true;
/*
* We start only with one section of pages, more pages are added as
* needed until the rest of deferred pages are initialized.
......@@ -6116,7 +6118,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* zone stats (e.g., nr_isolate_pageblock) are touched.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn,
unsigned long start_pfn, unsigned long zone_end_pfn,
enum meminit_context context,
struct vmem_altmap *altmap, int migratetype)
{
......@@ -6152,7 +6154,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (context == MEMINIT_EARLY) {
if (overlap_memmap_init(zone, &pfn))
continue;
if (defer_init(nid, pfn, end_pfn))
if (defer_init(nid, pfn, zone_end_pfn))
break;
}
......@@ -6266,7 +6268,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
memmap_init_zone(size, nid, zone, start_pfn,
memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
}
......
......@@ -1619,9 +1619,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
if (page)
account_slab_page(page, order, s);
return page;
}
......@@ -1774,6 +1771,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->objects = oo_objects(oo);
account_slab_page(page, oo_order(oo), s);
page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))
......
......@@ -6646,6 +6646,12 @@ sub process {
# }
# }
# strlcpy uses that should likely be strscpy
if ($line =~ /\bstrlcpy\s*\(/) {
WARN("STRLCPY",
"Prefer strscpy over strlcpy - see: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw\@mail.gmail.com/\n" . $herecurr);
}
# typecasts on min/max could be min_t/max_t
if ($perl_version_ok &&
defined $stat &&
......
......@@ -4,7 +4,7 @@
include local_config.mk
uname_M := $(shell uname -m 2>/dev/null || echo not)
MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/')
MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
# Without this, failed build products remain, with up-to-date timestamps,
# thus tricking Make (and you!) into believing that All Is Well, in subsequent
......@@ -43,7 +43,7 @@ TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
ifeq ($(ARCH),x86_64)
ifeq ($(MACHINE),x86_64)
CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32)
CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c)
CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie)
......@@ -65,13 +65,13 @@ TEST_GEN_FILES += $(BINARIES_64)
endif
else
ifneq (,$(findstring $(ARCH),powerpc))
ifneq (,$(findstring $(MACHINE),ppc64))
TEST_GEN_FILES += protection_keys
endif
endif
ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
TEST_GEN_FILES += write_to_hugetlbfs
......@@ -84,7 +84,7 @@ TEST_FILES := test_vmalloc.sh
KSFT_KHDR_INSTALL := 1
include ../lib.mk
ifeq ($(ARCH),x86_64)
ifeq ($(MACHINE),x86_64)
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment