Commit 8382c668 authored by Sean Christopherson's avatar Sean Christopherson Committed by Borislav Petkov

x86/vdso: Add support for exception fixup in vDSO functions

Signals are a horrid little mechanism.  They are especially nasty in
multi-threaded environments because signal state like handlers is global
across the entire process.  But, signals are basically the only way that
userspace can “gracefully” handle and recover from exceptions.

The kernel generally does not like exceptions to occur during execution.
But, exceptions are a fact of life and must be handled in some
circumstances.  The kernel handles them by keeping a list of individual
instructions which may cause exceptions.  Instead of truly handling the
exception and returning to the instruction that caused it, the kernel
instead restarts execution at a *different* instruction.  This makes it
obvious to that thread of execution that the exception occurred and lets
*that* code handle the exception instead of the handler.

This is not dissimilar to the try/catch exceptions mechanisms that some
programming languages have, but applied *very* surgically to single
instructions.  It effectively changes the visible architecture of the
instruction.

Problem
=======

SGX generates a lot of signals, and the code to enter and exit enclaves and
muck with signal handling is truly horrid.  At the same time, an approach
like kernel exception fixup can not be easily applied to userspace
instructions because it changes the visible instruction architecture.

Solution
========

The vDSO is a special page of kernel-provided instructions that run in
userspace.  Any userspace calling into the vDSO knows that it is special.
This allows the kernel a place to legitimately rewrite the user/kernel
contract and change instruction behavior.

Add support for fixing up exceptions that occur while executing in the
vDSO.  This replaces what could traditionally only be done with signal
handling.

This new mechanism will be used to replace previously direct use of SGX
instructions by userspace.

Just introduce the vDSO infrastructure.  Later patches will actually
replace signal generation with vDSO exception fixup.
Suggested-by: default avatarAndy Lutomirski <luto@amacapital.net>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarJarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarJethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-17-jarkko@kernel.org
parent c82c6186
...@@ -29,7 +29,7 @@ vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o ...@@ -29,7 +29,7 @@ vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
vobjs32-y += vdso32/vclock_gettime.o vobjs32-y += vdso32/vclock_gettime.o
# files to link into kernel # files to link into kernel
obj-y += vma.o obj-y += vma.o extable.o
KASAN_SANITIZE_vma.o := y KASAN_SANITIZE_vma.o := y
UBSAN_SANITIZE_vma.o := y UBSAN_SANITIZE_vma.o := y
KCSAN_SANITIZE_vma.o := y KCSAN_SANITIZE_vma.o := y
...@@ -128,8 +128,8 @@ $(obj)/%-x32.o: $(obj)/%.o FORCE ...@@ -128,8 +128,8 @@ $(obj)/%-x32.o: $(obj)/%.o FORCE
targets += vdsox32.lds $(vobjx32s-y) targets += vdsox32.lds $(vobjx32s-y)
$(obj)/%.so: OBJCOPYFLAGS := -S $(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table
$(obj)/%.so: $(obj)/%.so.dbg FORCE $(obj)/%.so: $(obj)/%.so.dbg
$(call if_changed,objcopy) $(call if_changed,objcopy)
$(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <linux/mm.h>
#include <asm/current.h>
#include <asm/traps.h>
#include <asm/vdso.h>
struct vdso_exception_table_entry {
int insn, fixup;
};
bool fixup_vdso_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr)
{
const struct vdso_image *image = current->mm->context.vdso_image;
const struct vdso_exception_table_entry *extable;
unsigned int nr_entries, i;
unsigned long base;
/*
* Do not attempt to fixup #DB or #BP. It's impossible to identify
* whether or not a #DB/#BP originated from within an SGX enclave and
* SGX enclaves are currently the only use case for vDSO fixup.
*/
if (trapnr == X86_TRAP_DB || trapnr == X86_TRAP_BP)
return false;
if (!current->mm->context.vdso)
return false;
base = (unsigned long)current->mm->context.vdso + image->extable_base;
nr_entries = image->extable_len / (sizeof(*extable));
extable = image->extable;
for (i = 0; i < nr_entries; i++) {
if (regs->ip == base + extable[i].insn) {
regs->ip = base + extable[i].fixup;
regs->di = trapnr;
regs->si = error_code;
regs->dx = fault_addr;
return true;
}
}
return false;
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __VDSO_EXTABLE_H
#define __VDSO_EXTABLE_H
/*
* Inject exception fixup for vDSO code. Unlike normal exception fixup,
* vDSO uses a dedicated handler the addresses are relative to the overall
* exception table, not each individual entry.
*/
#ifdef __ASSEMBLY__
#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \
ASM_VDSO_EXTABLE_HANDLE from to
.macro ASM_VDSO_EXTABLE_HANDLE from:req to:req
.pushsection __ex_table, "a"
.long (\from) - __ex_table
.long (\to) - __ex_table
.popsection
.endm
#else
#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \
".pushsection __ex_table, \"a\"\n" \
".long (" #from ") - __ex_table\n" \
".long (" #to ") - __ex_table\n" \
".popsection\n"
#endif
#endif /* __VDSO_EXTABLE_H */
...@@ -75,11 +75,18 @@ SECTIONS ...@@ -75,11 +75,18 @@ SECTIONS
* stuff that isn't used at runtime in between. * stuff that isn't used at runtime in between.
*/ */
.text : { *(.text*) } :text =0x90909090, .text : {
*(.text*)
*(.fixup)
} :text =0x90909090,
.altinstructions : { *(.altinstructions) } :text .altinstructions : { *(.altinstructions) } :text
.altinstr_replacement : { *(.altinstr_replacement) } :text .altinstr_replacement : { *(.altinstr_replacement) } :text
__ex_table : { *(__ex_table) } :text
/DISCARD/ : { /DISCARD/ : {
*(.discard) *(.discard)
*(.discard.*) *(.discard.*)
......
...@@ -5,6 +5,41 @@ ...@@ -5,6 +5,41 @@
* are built for 32-bit userspace. * are built for 32-bit userspace.
*/ */
static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (i % 10 == 0)
fprintf(outfile, "\n\t");
fprintf(outfile, "0x%02X, ", (int)(data)[i]);
}
}
/*
* Extract a section from the input data into a standalone blob. Used to
* capture kernel-only data that needs to persist indefinitely, e.g. the
* exception fixup tables, but only in the kernel, i.e. the section can
* be stripped from the final vDSO image.
*/
static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
FILE *outfile, ELF(Shdr) *sec, const char *name)
{
unsigned long offset;
size_t len;
offset = (unsigned long)GET_LE(&sec->sh_offset);
len = (size_t)GET_LE(&sec->sh_size);
if (offset + len > data_len)
fail("section to extract overruns input data");
fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
BITSFUNC(copy)(outfile, data + offset, len);
fprintf(outfile, "\n};\n\n");
}
static void BITSFUNC(go)(void *raw_addr, size_t raw_len, static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
void *stripped_addr, size_t stripped_len, void *stripped_addr, size_t stripped_len,
FILE *outfile, const char *image_name) FILE *outfile, const char *image_name)
...@@ -15,7 +50,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -15,7 +50,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
unsigned long i, syms_nr; unsigned long i, syms_nr;
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
*alt_sec = NULL; *alt_sec = NULL, *extable_sec = NULL;
ELF(Dyn) *dyn = 0, *dyn_end = 0; ELF(Dyn) *dyn = 0, *dyn_end = 0;
const char *secstrings; const char *secstrings;
INT_BITS syms[NSYMS] = {}; INT_BITS syms[NSYMS] = {};
...@@ -77,6 +112,8 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -77,6 +112,8 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
if (!strcmp(secstrings + GET_LE(&sh->sh_name), if (!strcmp(secstrings + GET_LE(&sh->sh_name),
".altinstructions")) ".altinstructions"))
alt_sec = sh; alt_sec = sh;
if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table"))
extable_sec = sh;
} }
if (!symtab_hdr) if (!symtab_hdr)
...@@ -155,6 +192,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -155,6 +192,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
(int)((unsigned char *)stripped_addr)[i]); (int)((unsigned char *)stripped_addr)[i]);
} }
fprintf(outfile, "\n};\n\n"); fprintf(outfile, "\n};\n\n");
if (extable_sec)
BITSFUNC(extract)(raw_addr, raw_len, outfile,
extable_sec, "extable");
fprintf(outfile, "const struct vdso_image %s = {\n", image_name); fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
fprintf(outfile, "\t.data = raw_data,\n"); fprintf(outfile, "\t.data = raw_data,\n");
...@@ -165,6 +205,14 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -165,6 +205,14 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
fprintf(outfile, "\t.alt_len = %lu,\n", fprintf(outfile, "\t.alt_len = %lu,\n",
(unsigned long)GET_LE(&alt_sec->sh_size)); (unsigned long)GET_LE(&alt_sec->sh_size));
} }
if (extable_sec) {
fprintf(outfile, "\t.extable_base = %lu,\n",
(unsigned long)GET_LE(&extable_sec->sh_offset));
fprintf(outfile, "\t.extable_len = %lu,\n",
(unsigned long)GET_LE(&extable_sec->sh_size));
fprintf(outfile, "\t.extable = extable,\n");
}
for (i = 0; i < NSYMS; i++) { for (i = 0; i < NSYMS; i++) {
if (required_syms[i].export && syms[i]) if (required_syms[i].export && syms[i])
fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
......
...@@ -15,6 +15,8 @@ struct vdso_image { ...@@ -15,6 +15,8 @@ struct vdso_image {
unsigned long size; /* Always a multiple of PAGE_SIZE */ unsigned long size; /* Always a multiple of PAGE_SIZE */
unsigned long alt, alt_len; unsigned long alt, alt_len;
unsigned long extable_base, extable_len;
const void *extable;
long sym_vvar_start; /* Negative offset to the vvar area */ long sym_vvar_start; /* Negative offset to the vvar area */
...@@ -45,6 +47,9 @@ extern void __init init_vdso_image(const struct vdso_image *image); ...@@ -45,6 +47,9 @@ extern void __init init_vdso_image(const struct vdso_image *image);
extern int map_vdso_once(const struct vdso_image *image, unsigned long addr); extern int map_vdso_once(const struct vdso_image *image, unsigned long addr);
extern bool fixup_vdso_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr);
#endif /* __ASSEMBLER__ */ #endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_VDSO_H */ #endif /* _ASM_X86_VDSO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment