Commit 9d9a9bf0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.4-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Alexander Gordeev:

 - Use correct type for size of memory allocated for ELF core header on
   kernel crash.

 - Fix insecure W+X mapping warning when KASAN shadow memory range is
   not aligned on page boundary.

 - Avoid allocation of short by one page KASAN shadow memory when the
   original memory range is less than (PAGE_SIZE << 3).

 - Fix virtual vs physical address confusion in physical memory
   enumerator. It is not a real issue, since virtual and physical
   addresses are currently the same.

 - Set CONFIG_NET_TC_SKB_EXT=y in s390 config files as it is required
   for offloading TC as well as bridges on switchdev capable ConnectX
   devices.

* tag 's390-6.4-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/defconfigs: set CONFIG_NET_TC_SKB_EXT=y
  s390/boot: fix physmem_info virtual vs physical address confusion
  s390/kasan: avoid short by one page shadow memory
  s390/kasan: fix insecure W+X mapping warning
  s390/crash: use the correct type for memory allocation
parents be5b52dc ad3d770b
...@@ -45,6 +45,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat ...@@ -45,6 +45,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
static pte_t pte_z; static pte_t pte_z;
static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
{
start = PAGE_ALIGN_DOWN(__sha(start));
end = PAGE_ALIGN(__sha(end));
pgtable_populate(start, end, mode);
}
static void kasan_populate_shadow(void) static void kasan_populate_shadow(void)
{ {
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
...@@ -95,17 +102,17 @@ static void kasan_populate_shadow(void) ...@@ -95,17 +102,17 @@ static void kasan_populate_shadow(void)
*/ */
for_each_physmem_usable_range(i, &start, &end) for_each_physmem_usable_range(i, &start, &end)
pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW); kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START; untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */ /* shallowly populate kasan shadow for vmalloc and modules */
pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW); kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
} else { } else {
untracked_end = MODULES_VADDR; untracked_end = MODULES_VADDR;
} }
/* populate kasan shadow for untracked memory */ /* populate kasan shadow for untracked memory */
pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW); kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW); kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
} }
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr, static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
......
...@@ -116,6 +116,7 @@ CONFIG_UNIX=y ...@@ -116,6 +116,7 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_SMC=m CONFIG_SMC=m
CONFIG_SMC_DIAG=m CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
......
...@@ -107,6 +107,7 @@ CONFIG_UNIX=y ...@@ -107,6 +107,7 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_NET_TC_SKB_EXT=y
CONFIG_SMC=m CONFIG_SMC=m
CONFIG_SMC_DIAG=m CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define _ASM_S390_MEM_DETECT_H #define _ASM_S390_MEM_DETECT_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/page.h>
enum physmem_info_source { enum physmem_info_source {
MEM_DETECT_NONE = 0, MEM_DETECT_NONE = 0,
...@@ -133,7 +134,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t) ...@@ -133,7 +134,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
#define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \ #define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \
for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \ for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \
range && range->end; range = range->chain, \ range && range->end; range = range->chain ? __va(range->chain) : NULL, \
*p_start = range ? range->start : 0, *p_end = range ? range->end : 0) *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t, static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
...@@ -145,7 +146,7 @@ static inline struct reserved_range *__physmem_reserved_next(enum reserved_range ...@@ -145,7 +146,7 @@ static inline struct reserved_range *__physmem_reserved_next(enum reserved_range
return range; return range;
} }
if (range->chain) if (range->chain)
return range->chain; return __va(range->chain);
while (++*t < RR_MAX) { while (++*t < RR_MAX) {
range = &physmem_info.reserved[*t]; range = &physmem_info.reserved[*t];
if (range->end) if (range->end)
......
...@@ -568,9 +568,9 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) ...@@ -568,9 +568,9 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt)
int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
{ {
Elf64_Phdr *phdr_notes, *phdr_loads; Elf64_Phdr *phdr_notes, *phdr_loads;
size_t alloc_size;
int mem_chunk_cnt; int mem_chunk_cnt;
void *ptr, *hdr; void *ptr, *hdr;
u32 alloc_size;
u64 hdr_off; u64 hdr_off;
/* If we are not in kdump or zfcp/nvme dump mode return */ /* If we are not in kdump or zfcp/nvme dump mode return */
......
...@@ -667,7 +667,15 @@ static void __init memblock_region_swap(void *a, void *b, int size) ...@@ -667,7 +667,15 @@ static void __init memblock_region_swap(void *a, void *b, int size)
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
static inline int set_memory_kasan(unsigned long start, unsigned long end)
{
start = PAGE_ALIGN_DOWN(__sha(start));
end = PAGE_ALIGN(__sha(end));
return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT);
}
#endif #endif
/* /*
* map whole physical memory to virtual memory (identity mapping) * map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug * we reserve enough space in the vmalloc area for vmemmap to hotplug
...@@ -737,10 +745,8 @@ void __init vmem_map_init(void) ...@@ -737,10 +745,8 @@ void __init vmem_map_init(void)
} }
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
for_each_mem_range(i, &base, &end) { for_each_mem_range(i, &base, &end)
set_memory_rwnx(__sha(base), set_memory_kasan(base, end);
(__sha(end) - __sha(base)) >> PAGE_SHIFT);
}
#endif #endif
set_memory_rox((unsigned long)_stext, set_memory_rox((unsigned long)_stext,
(unsigned long)(_etext - _stext) >> PAGE_SHIFT); (unsigned long)(_etext - _stext) >> PAGE_SHIFT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment