Commit 18b7fd1c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge yet more updates from Andrew Morton:

 - various hotfixes

 - kexec_file updates and feature work

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (27 commits)
  kernel/kexec_file.c: move purgatories sha256 to common code
  kernel/kexec_file.c: allow archs to set purgatory load address
  kernel/kexec_file.c: remove mis-use of sh_offset field during purgatory load
  kernel/kexec_file.c: remove unneeded variables in kexec_purgatory_setup_sechdrs
  kernel/kexec_file.c: remove unneeded for-loop in kexec_purgatory_setup_sechdrs
  kernel/kexec_file.c: split up __kexec_load_puragory
  kernel/kexec_file.c: use read-only sections in arch_kexec_apply_relocations*
  kernel/kexec_file.c: search symbols in read-only kexec_purgatory
  kernel/kexec_file.c: make purgatory_info->ehdr const
  kernel/kexec_file.c: remove checks in kexec_purgatory_load
  include/linux/kexec.h: silence compile warnings
  kexec_file, x86: move re-factored code to generic side
  x86: kexec_file: clean up prepare_elf64_headers()
  x86: kexec_file: lift CRASH_MAX_RANGES limit on crash_mem buffer
  x86: kexec_file: remove X86_64 dependency from prepare_elf64_headers()
  x86: kexec_file: purge system-ram walking from prepare_elf64_headers()
  kexec_file,x86,powerpc: factor out kexec_file_ops functions
  kexec_file: make use of purgatory optional
  proc: revalidate misc dentries
  mm, slab: reschedule cache_reap() on the same CPU
  ...
parents 48023102 df6f2801
...@@ -178,6 +178,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, ...@@ -178,6 +178,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
......
...@@ -552,6 +552,9 @@ config KEXEC_FILE ...@@ -552,6 +552,9 @@ config KEXEC_FILE
for kernel and initramfs as opposed to a list of segments as is the for kernel and initramfs as opposed to a list of segments as is the
case for the older kexec call. case for the older kexec call.
config ARCH_HAS_KEXEC_PURGATORY
def_bool KEXEC_FILE
config RELOCATABLE config RELOCATABLE
bool "Build a relocatable kernel" bool "Build a relocatable kernel"
depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE)) depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
......
...@@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void) ...@@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void)
} }
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
extern struct kexec_file_ops kexec_elf64_ops; extern const struct kexec_file_ops kexec_elf64_ops;
#ifdef CONFIG_IMA_KEXEC #ifdef CONFIG_IMA_KEXEC
#define ARCH_HAS_KIMAGE_ARCH #define ARCH_HAS_KIMAGE_ARCH
......
...@@ -572,7 +572,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ...@@ -572,7 +572,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
{ {
int ret; int ret;
unsigned int fdt_size; unsigned int fdt_size;
unsigned long kernel_load_addr, purgatory_load_addr; unsigned long kernel_load_addr;
unsigned long initrd_load_addr = 0, fdt_load_addr; unsigned long initrd_load_addr = 0, fdt_load_addr;
void *fdt; void *fdt;
const void *slave_code; const void *slave_code;
...@@ -580,6 +580,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ...@@ -580,6 +580,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
struct elf_info elf_info; struct elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0, struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size }; .buf_max = ppc64_rma_size };
struct kexec_buf pbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size, .top_down = true };
ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info); ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret) if (ret)
...@@ -591,14 +593,13 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ...@@ -591,14 +593,13 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr); pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true, ret = kexec_load_purgatory(image, &pbuf);
&purgatory_load_addr);
if (ret) { if (ret) {
pr_err("Loading purgatory failed.\n"); pr_err("Loading purgatory failed.\n");
goto out; goto out;
} }
pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
if (initrd != NULL) { if (initrd != NULL) {
kbuf.buffer = initrd; kbuf.buffer = initrd;
...@@ -657,7 +658,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ...@@ -657,7 +658,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
return ret ? ERR_PTR(ret) : fdt; return ret ? ERR_PTR(ret) : fdt;
} }
struct kexec_file_ops kexec_elf64_ops = { const struct kexec_file_ops kexec_elf64_ops = {
.probe = elf64_probe, .probe = elf64_probe,
.load = elf64_load, .load = elf64_load,
}; };
...@@ -31,52 +31,19 @@ ...@@ -31,52 +31,19 @@
#define SLAVE_CODE_SIZE 256 #define SLAVE_CODE_SIZE 256
static struct kexec_file_ops *kexec_file_loaders[] = { const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_elf64_ops, &kexec_elf64_ops,
NULL
}; };
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len) unsigned long buf_len)
{ {
int i, ret = -ENOEXEC;
struct kexec_file_ops *fops;
/* We don't support crash kernels yet. */ /* We don't support crash kernels yet. */
if (image->type == KEXEC_TYPE_CRASH) if (image->type == KEXEC_TYPE_CRASH)
return -EOPNOTSUPP; return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { return kexec_image_probe_default(image, buf, buf_len);
fops = kexec_file_loaders[i];
if (!fops || !fops->probe)
continue;
ret = fops->probe(buf, buf_len);
if (!ret) {
image->fops = fops;
return ret;
}
}
return ret;
}
void *arch_kexec_kernel_image_load(struct kimage *image)
{
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
return image->fops->load(image, image->kernel_buf,
image->kernel_buf_len, image->initrd_buf,
image->initrd_buf_len, image->cmdline_buf,
image->cmdline_buf_len);
}
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
} }
/** /**
......
...@@ -220,6 +220,8 @@ static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, ...@@ -220,6 +220,8 @@ static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
......
...@@ -160,6 +160,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, ...@@ -160,6 +160,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
......
...@@ -193,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, ...@@ -193,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1; return 1;
} }
/*
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
{ {
......
...@@ -2008,6 +2008,9 @@ config KEXEC_FILE ...@@ -2008,6 +2008,9 @@ config KEXEC_FILE
for kernel and initramfs as opposed to list of segments as for kernel and initramfs as opposed to list of segments as
accepted by previous system call. accepted by previous system call.
config ARCH_HAS_KEXEC_PURGATORY
def_bool KEXEC_FILE
config KEXEC_VERIFY_SIG config KEXEC_VERIFY_SIG
bool "Verify kernel signature during kexec_file_load() syscall" bool "Verify kernel signature during kexec_file_load() syscall"
depends on KEXEC_FILE depends on KEXEC_FILE
......
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
#ifndef _ASM_KEXEC_BZIMAGE64_H #ifndef _ASM_KEXEC_BZIMAGE64_H
#define _ASM_KEXEC_BZIMAGE64_H #define _ASM_KEXEC_BZIMAGE64_H
extern struct kexec_file_ops kexec_bzImage64_ops; extern const struct kexec_file_ops kexec_bzImage64_ops;
#endif /* _ASM_KEXE_BZIMAGE64_H */ #endif /* _ASM_KEXE_BZIMAGE64_H */
This diff is collapsed.
...@@ -334,7 +334,6 @@ static void *bzImage64_load(struct kimage *image, char *kernel, ...@@ -334,7 +334,6 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned long setup_header_size, params_cmdline_sz; unsigned long setup_header_size, params_cmdline_sz;
struct boot_params *params; struct boot_params *params;
unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr; unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
unsigned long purgatory_load_addr;
struct bzimage64_data *ldata; struct bzimage64_data *ldata;
struct kexec_entry64_regs regs64; struct kexec_entry64_regs regs64;
void *stack; void *stack;
...@@ -342,6 +341,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel, ...@@ -342,6 +341,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset; unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX, struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX,
.top_down = true }; .top_down = true };
struct kexec_buf pbuf = { .image = image, .buf_min = MIN_PURGATORY_ADDR,
.buf_max = ULONG_MAX, .top_down = true };
header = (struct setup_header *)(kernel + setup_hdr_offset); header = (struct setup_header *)(kernel + setup_hdr_offset);
setup_sects = header->setup_sects; setup_sects = header->setup_sects;
...@@ -379,14 +380,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel, ...@@ -379,14 +380,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
* Load purgatory. For 64bit entry point, purgatory code can be * Load purgatory. For 64bit entry point, purgatory code can be
* anywhere. * anywhere.
*/ */
ret = kexec_load_purgatory(image, MIN_PURGATORY_ADDR, ULONG_MAX, 1, ret = kexec_load_purgatory(image, &pbuf);
&purgatory_load_addr);
if (ret) { if (ret) {
pr_err("Loading purgatory failed\n"); pr_err("Loading purgatory failed\n");
return ERR_PTR(ret); return ERR_PTR(ret);
} }
pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* /*
...@@ -538,7 +538,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) ...@@ -538,7 +538,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
} }
#endif #endif
struct kexec_file_ops kexec_bzImage64_ops = { const struct kexec_file_ops kexec_bzImage64_ops = {
.probe = bzImage64_probe, .probe = bzImage64_probe,
.load = bzImage64_load, .load = bzImage64_load,
.cleanup = bzImage64_cleanup, .cleanup = bzImage64_cleanup,
......
...@@ -30,8 +30,9 @@ ...@@ -30,8 +30,9 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = { const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_bzImage64_ops, &kexec_bzImage64_ops,
NULL
}; };
#endif #endif
...@@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void) ...@@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void)
/* arch-dependent functionality related to kexec file-based syscall */ /* arch-dependent functionality related to kexec file-based syscall */
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
int i, ret = -ENOEXEC;
struct kexec_file_ops *fops;
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
fops = kexec_file_loaders[i];
if (!fops || !fops->probe)
continue;
ret = fops->probe(buf, buf_len);
if (!ret) {
image->fops = fops;
return ret;
}
}
return ret;
}
void *arch_kexec_kernel_image_load(struct kimage *image) void *arch_kexec_kernel_image_load(struct kimage *image)
{ {
vfree(image->arch.elf_headers); vfree(image->arch.elf_headers);
...@@ -399,88 +379,53 @@ void *arch_kexec_kernel_image_load(struct kimage *image) ...@@ -399,88 +379,53 @@ void *arch_kexec_kernel_image_load(struct kimage *image)
image->cmdline_buf_len); image->cmdline_buf_len);
} }
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
}
#ifdef CONFIG_KEXEC_VERIFY_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
unsigned long kernel_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.");
return -EKEYREJECTED;
}
return image->fops->verify_sig(kernel, kernel_len);
}
#endif
/* /*
* Apply purgatory relocations. * Apply purgatory relocations.
* *
* ehdr: Pointer to elf headers * @pi: Purgatory to be relocated.
* sechdrs: Pointer to section headers. * @section: Section relocations applying to.
* relsec: section index of SHT_RELA section. * @relsec: Section containing RELAs.
* @symtabsec: Corresponding symtab.
* *
* TODO: Some of the code belongs to generic code. Move that in kexec.c. * TODO: Some of the code belongs to generic code. Move that in kexec.c.
*/ */
int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf64_Shdr *sechdrs, unsigned int relsec) Elf_Shdr *section, const Elf_Shdr *relsec,
const Elf_Shdr *symtabsec)
{ {
unsigned int i; unsigned int i;
Elf64_Rela *rel; Elf64_Rela *rel;
Elf64_Sym *sym; Elf64_Sym *sym;
void *location; void *location;
Elf64_Shdr *section, *symtabsec;
unsigned long address, sec_base, value; unsigned long address, sec_base, value;
const char *strtab, *name, *shstrtab; const char *strtab, *name, *shstrtab;
const Elf_Shdr *sechdrs;
/* /* String & section header string table */
* ->sh_offset has been modified to keep the pointer to section sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
* contents in memory strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset;
*/ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
rel = (void *)sechdrs[relsec].sh_offset;
/* Section to which relocations apply */
section = &sechdrs[sechdrs[relsec].sh_info];
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
/* Associated symbol table */
symtabsec = &sechdrs[sechdrs[relsec].sh_link];
/* String table */
if (symtabsec->sh_link >= ehdr->e_shnum) {
/* Invalid strtab section number */
pr_err("Invalid string table section index %d\n",
symtabsec->sh_link);
return -ENOEXEC;
}
strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset; rel = (void *)pi->ehdr + relsec->sh_offset;
/* section header string table */ pr_debug("Applying relocate section %s to %u\n",
shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset; shstrtab + relsec->sh_name, relsec->sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) {
/* /*
* rel[i].r_offset contains byte offset from beginning * rel[i].r_offset contains byte offset from beginning
* of section to the storage unit affected. * of section to the storage unit affected.
* *
* This is location to update (->sh_offset). This is temporary * This is location to update. This is temporary buffer
* buffer where section is currently loaded. This will finally * where section is currently loaded. This will finally be
* be loaded to a different address later, pointed to by * loaded to a different address later, pointed to by
* ->sh_addr. kexec takes care of moving it * ->sh_addr. kexec takes care of moving it
* (kexec_load_segment()). * (kexec_load_segment()).
*/ */
location = (void *)(section->sh_offset + rel[i].r_offset); location = pi->purgatory_buf;
location += section->sh_offset;
location += rel[i].r_offset;
/* Final address of the location */ /* Final address of the location */
address = section->sh_addr + rel[i].r_offset; address = section->sh_addr + rel[i].r_offset;
...@@ -491,8 +436,8 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, ...@@ -491,8 +436,8 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
* to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
* these respectively. * these respectively.
*/ */
sym = (Elf64_Sym *)symtabsec->sh_offset + sym = (void *)pi->ehdr + symtabsec->sh_offset;
ELF64_R_SYM(rel[i].r_info); sym += ELF64_R_SYM(rel[i].r_info);
if (sym->st_name) if (sym->st_name)
name = strtab + sym->st_name; name = strtab + sym->st_name;
...@@ -515,12 +460,12 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, ...@@ -515,12 +460,12 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
if (sym->st_shndx == SHN_ABS) if (sym->st_shndx == SHN_ABS)
sec_base = 0; sec_base = 0;
else if (sym->st_shndx >= ehdr->e_shnum) { else if (sym->st_shndx >= pi->ehdr->e_shnum) {
pr_err("Invalid section %d for symbol %s\n", pr_err("Invalid section %d for symbol %s\n",
sym->st_shndx, name); sym->st_shndx, name);
return -ENOEXEC; return -ENOEXEC;
} else } else
sec_base = sechdrs[sym->st_shndx].sh_addr; sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
value = sym->st_value; value = sym->st_value;
value += sec_base; value += sec_base;
......
...@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string ...@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
targets += $(purgatory-y) targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/sha256.c
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro targets += purgatory.ro
......
...@@ -11,9 +11,9 @@ ...@@ -11,9 +11,9 @@
*/ */
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/sha256.h>
#include <asm/purgatory.h> #include <asm/purgatory.h>
#include "sha256.h"
#include "../boot/string.h" #include "../boot/string.h"
unsigned long purgatory_backup_dest __section(.kexec-purgatory); unsigned long purgatory_backup_dest __section(.kexec-purgatory);
......
...@@ -10,4 +10,16 @@ ...@@ -10,4 +10,16 @@
* Version 2. See the file COPYING for more details. * Version 2. See the file COPYING for more details.
*/ */
#include <linux/types.h>
#include "../boot/string.c" #include "../boot/string.c"
void *memcpy(void *dst, const void *src, size_t len)
{
return __builtin_memcpy(dst, src, len);
}
void *memset(void *dst, int c, size_t len)
{
return __builtin_memset(dst, c, len);
}
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/namei.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/mount.h> #include <linux/mount.h>
...@@ -217,6 +218,26 @@ void proc_free_inum(unsigned int inum) ...@@ -217,6 +218,26 @@ void proc_free_inum(unsigned int inum)
ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
} }
static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0)
return 0; /* revalidate */
return 1;
}
static int proc_misc_d_delete(const struct dentry *dentry)
{
return atomic_read(&PDE(d_inode(dentry))->in_use) < 0;
}
static const struct dentry_operations proc_misc_dentry_ops = {
.d_revalidate = proc_misc_d_revalidate,
.d_delete = proc_misc_d_delete,
};
/* /*
* Don't create negative dentries here, return -ENOENT by hand * Don't create negative dentries here, return -ENOENT by hand
* instead. * instead.
...@@ -234,7 +255,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, ...@@ -234,7 +255,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
inode = proc_get_inode(dir->i_sb, de); inode = proc_get_inode(dir->i_sb, de);
if (!inode) if (!inode)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
d_set_d_op(dentry, &simple_dentry_operations); d_set_d_op(dentry, &proc_misc_dentry_ops);
d_add(dentry, inode); d_add(dentry, inode);
return NULL; return NULL;
} }
......
...@@ -99,21 +99,25 @@ struct compat_kexec_segment { ...@@ -99,21 +99,25 @@ struct compat_kexec_segment {
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
struct purgatory_info { struct purgatory_info {
/* Pointer to elf header of read only purgatory */ /*
Elf_Ehdr *ehdr; * Pointer to elf header at the beginning of kexec_purgatory.
* Note: kexec_purgatory is read only
/* Pointer to purgatory sechdrs which are modifiable */ */
const Elf_Ehdr *ehdr;
/*
* Temporary, modifiable buffer for sechdrs used for relocation.
* This memory can be freed post image load.
*/
Elf_Shdr *sechdrs; Elf_Shdr *sechdrs;
/* /*
* Temporary buffer location where purgatory is loaded and relocated * Temporary, modifiable buffer for stripped purgatory used for
* This memory can be freed post image load * relocation. This memory can be freed post image load.
*/ */
void *purgatory_buf; void *purgatory_buf;
/* Address where purgatory is finally loaded and is executed from */
unsigned long purgatory_load_addr;
}; };
struct kimage;
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd, unsigned long kernel_len, char *initrd,
...@@ -135,6 +139,11 @@ struct kexec_file_ops { ...@@ -135,6 +139,11 @@ struct kexec_file_ops {
#endif #endif
}; };
extern const struct kexec_file_ops * const kexec_file_loaders[];
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len);
/** /**
* struct kexec_buf - parameters for finding a place for a buffer in memory * struct kexec_buf - parameters for finding a place for a buffer in memory
* @image: kexec image in which memory to search. * @image: kexec image in which memory to search.
...@@ -159,10 +168,44 @@ struct kexec_buf { ...@@ -159,10 +168,44 @@ struct kexec_buf {
bool top_down; bool top_down;
}; };
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
int (*func)(struct resource *, void *)); int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf);
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
struct crash_mem_range {
u64 start, end;
};
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
struct crash_mem_range ranges[0];
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart,
unsigned long long mend);
extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
void **addr, unsigned long *sz);
#endif /* CONFIG_KEXEC_FILE */ #endif /* CONFIG_KEXEC_FILE */
struct kimage { struct kimage {
...@@ -209,7 +252,7 @@ struct kimage { ...@@ -209,7 +252,7 @@ struct kimage {
unsigned long cmdline_buf_len; unsigned long cmdline_buf_len;
/* File operations provided by image loader */ /* File operations provided by image loader */
struct kexec_file_ops *fops; const struct kexec_file_ops *fops;
/* Image loader handling the kernel can store a pointer here */ /* Image loader handling the kernel can store a pointer here */
void *image_loader_data; void *image_loader_data;
...@@ -226,14 +269,6 @@ extern void machine_kexec_cleanup(struct kimage *image); ...@@ -226,14 +269,6 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void); extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image, extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order); unsigned int order);
extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
unsigned long max, int top_down,
unsigned long *load_addr);
extern int kexec_purgatory_get_set_symbol(struct kimage *image,
const char *name, void *buf,
unsigned int size, bool get_value);
extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
const char *name);
extern void __crash_kexec(struct pt_regs *); extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *); int kexec_should_crash(struct task_struct *);
...@@ -273,16 +308,6 @@ int crash_shrink_memory(unsigned long new_size); ...@@ -273,16 +308,6 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void); size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len);
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
Elf_Shdr *sechdrs, unsigned int relsec);
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned int relsec);
void arch_kexec_protect_crashkres(void); void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void); void arch_kexec_unprotect_crashkres(void);
......
...@@ -13,6 +13,15 @@ ...@@ -13,6 +13,15 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
/*
* Stand-alone implementation of the SHA256 algorithm. It is designed to
* have as little dependencies as possible so it can be used in the
* kexec_file purgatory. In other cases you should use the implementation in
* crypto/.
*
* For details see lib/sha256.c
*/
extern int sha256_init(struct sha256_state *sctx); extern int sha256_init(struct sha256_state *sctx);
extern int sha256_update(struct sha256_state *sctx, const u8 *input, extern int sha256_update(struct sha256_state *sctx, const u8 *input,
unsigned int length); unsigned int length);
......
...@@ -225,6 +225,12 @@ static int __shm_open(struct vm_area_struct *vma) ...@@ -225,6 +225,12 @@ static int __shm_open(struct vm_area_struct *vma)
if (IS_ERR(shp)) if (IS_ERR(shp))
return PTR_ERR(shp); return PTR_ERR(shp);
if (shp->shm_file != sfd->file) {
/* ID was reused */
shm_unlock(shp);
return -EINVAL;
}
shp->shm_atim = ktime_get_real_seconds(); shp->shm_atim = ktime_get_real_seconds();
ipc_update_pid(&shp->shm_lprid, task_tgid(current)); ipc_update_pid(&shp->shm_lprid, task_tgid(current));
shp->shm_nattch++; shp->shm_nattch++;
...@@ -455,8 +461,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -455,8 +461,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
int ret; int ret;
/* /*
* In case of remap_file_pages() emulation, the file can represent * In case of remap_file_pages() emulation, the file can represent an
* removed IPC ID: propogate shm_lock() error to caller. * IPC ID that was removed, and possibly even reused by another shm
* segment already. Propagate this case as an error to caller.
*/ */
ret = __shm_open(vma); ret = __shm_open(vma);
if (ret) if (ret)
...@@ -480,6 +487,7 @@ static int shm_release(struct inode *ino, struct file *file) ...@@ -480,6 +487,7 @@ static int shm_release(struct inode *ino, struct file *file)
struct shm_file_data *sfd = shm_file_data(file); struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns); put_ipc_ns(sfd->ns);
fput(sfd->file);
shm_file_data(file) = NULL; shm_file_data(file) = NULL;
kfree(sfd); kfree(sfd);
return 0; return 0;
...@@ -1445,7 +1453,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ...@@ -1445,7 +1453,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
file->f_mapping = shp->shm_file->f_mapping; file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id; sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns); sfd->ns = get_ipc_ns(ns);
sfd->file = shp->shm_file; /*
* We need to take a reference to the real shm file to prevent the
* pointer from becoming stale in cases where the lifetime of the outer
* file extends beyond that of the shm segment. It's not usually
* possible, but it can happen during remap_file_pages() emulation as
* that unmaps the memory, then does ->mmap() via file reference only.
* We'll deny the ->mmap() if the shm segment was since removed, but to
* detect shm ID reuse we need to compare the file pointers.
*/
sfd->file = get_file(shp->shm_file);
sfd->vm_ops = NULL; sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags); err = security_mmap_file(file, prot, flags);
......
...@@ -454,6 +454,7 @@ static int __init crash_save_vmcoreinfo_init(void) ...@@ -454,6 +454,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_lru); VMCOREINFO_NUMBER(PG_lru);
VMCOREINFO_NUMBER(PG_private); VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache); VMCOREINFO_NUMBER(PG_swapcache);
VMCOREINFO_NUMBER(PG_swapbacked);
VMCOREINFO_NUMBER(PG_slab); VMCOREINFO_NUMBER(PG_slab);
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
VMCOREINFO_NUMBER(PG_hwpoison); VMCOREINFO_NUMBER(PG_hwpoison);
......
This diff is collapsed.
...@@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old, ...@@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old,
alloc.start = constraint->alignf(constraint->alignf_data, &avail, alloc.start = constraint->alignf(constraint->alignf_data, &avail,
size, constraint->align); size, constraint->align);
alloc.end = alloc.start + size - 1; alloc.end = alloc.start + size - 1;
if (resource_contains(&avail, &alloc)) { if (alloc.start <= alloc.end &&
resource_contains(&avail, &alloc)) {
new->start = alloc.start; new->start = alloc.start;
new->end = alloc.end; new->end = alloc.end;
return 0; return 0;
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
*/ */
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/sha256.h>
#include <linux/string.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include "sha256.h"
#include "../boot/string.h"
static inline u32 Ch(u32 x, u32 y, u32 z) static inline u32 Ch(u32 x, u32 y, u32 z)
{ {
......
...@@ -2719,7 +2719,6 @@ int filemap_page_mkwrite(struct vm_fault *vmf) ...@@ -2719,7 +2719,6 @@ int filemap_page_mkwrite(struct vm_fault *vmf)
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
return ret; return ret;
} }
EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = { const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
...@@ -2750,6 +2749,10 @@ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -2750,6 +2749,10 @@ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
return generic_file_mmap(file, vma); return generic_file_mmap(file, vma);
} }
#else #else
int filemap_page_mkwrite(struct vm_fault *vmf)
{
return -ENOSYS;
}
int generic_file_mmap(struct file * file, struct vm_area_struct * vma) int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{ {
return -ENOSYS; return -ENOSYS;
...@@ -2760,6 +2763,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) ...@@ -2760,6 +2763,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
EXPORT_SYMBOL(filemap_page_mkwrite);
EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap);
......
...@@ -1740,7 +1740,9 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) ...@@ -1740,7 +1740,9 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
/* /*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP. It will only return non-negative values. * the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
...@@ -1806,9 +1808,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -1806,9 +1808,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
len = (unsigned long) nr_pages << PAGE_SHIFT; len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len; end = start + len;
if (nr_pages <= 0)
return 0;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len))) (void __user *)start, len)))
return 0; return -EFAULT;
if (gup_fast_permitted(start, nr_pages, write)) { if (gup_fast_permitted(start, nr_pages, write)) {
local_irq_disable(); local_irq_disable();
......
...@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd, ...@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct page **pages; struct page **pages;
nr_pages = gup->size / PAGE_SIZE; nr_pages = gup->size / PAGE_SIZE;
pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL); pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
...@@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd, ...@@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
} }
nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i); nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
if (nr <= 0)
break;
i += nr; i += nr;
} }
end_time = ktime_get(); end_time = ktime_get();
......
...@@ -4086,7 +4086,8 @@ static void cache_reap(struct work_struct *w) ...@@ -4086,7 +4086,8 @@ static void cache_reap(struct work_struct *w)
next_reap_node(); next_reap_node();
out: out:
/* Set up the next iteration */ /* Set up the next iteration */
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); schedule_delayed_work_on(smp_processor_id(), work,
round_jiffies_relative(REAPTIMEOUT_AC));
} }
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
......
...@@ -297,8 +297,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) ...@@ -297,8 +297,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* If the architecture not support this function, simply return with no * Note a difference with get_user_pages_fast: this always returns the
* page pinned * number of pages pinned, 0 if no pages were pinned.
* If the architecture does not support this function, simply return with no
* pages pinned.
*/ */
int __weak __get_user_pages_fast(unsigned long start, int __weak __get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages) int nr_pages, int write, struct page **pages)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment