Commit 70d87cbb authored by H. Peter Anvin's avatar H. Peter Anvin Committed by Ben Hutchings

x86, espfix: Make it possible to disable 16-bit support

commit 34273f41 upstream.

Embedded systems, which may be very memory-size-sensitive, are
extremely unlikely to ever encounter any 16-bit software, so make it
a CONFIG_EXPERT option to turn off support for any 16-bit software
whatsoever.
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent da22646d
...@@ -896,14 +896,27 @@ config VM86 ...@@ -896,14 +896,27 @@ config VM86
default y default y
depends on X86_32 depends on X86_32
---help--- ---help---
This option is required by programs like DOSEMU to run 16-bit legacy This option is required by programs like DOSEMU to run
code on X86 processors. It also may be needed by software like 16-bit real mode legacy code on x86 processors. It also may
XFree86 to initialize some video cards via BIOS. Disabling this be needed by software like XFree86 to initialize some video
option saves about 6k. cards via BIOS. Disabling this option saves about 6K.
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
default y
---help---
This option is required by programs like Wine to run 16-bit
protected mode legacy code on x86 processors. Disabling
this option saves about 300 bytes on i386, or around 6K text
plus 16K runtime memory on x86-64,
config X86_ESPFIX32
def_bool y
depends on X86_16BIT && X86_32
config X86_ESPFIX64 config X86_ESPFIX64
def_bool y def_bool y
depends on X86_64 depends on X86_16BIT && X86_64
config TOSHIBA config TOSHIBA
tristate "Toshiba Laptop support" tristate "Toshiba Laptop support"
......
...@@ -527,6 +527,7 @@ syscall_exit: ...@@ -527,6 +527,7 @@ syscall_exit:
restore_all: restore_all:
TRACE_IRQS_IRET TRACE_IRQS_IRET
restore_all_notrace: restore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# are returning to the kernel. # are returning to the kernel.
...@@ -537,6 +538,7 @@ restore_all_notrace: ...@@ -537,6 +538,7 @@ restore_all_notrace:
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS je ldt_ss # returning to user-space with LDT SS
#endif
restore_nocheck: restore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code RESTORE_REGS 4 # skip orig_eax/error_code
irq_return: irq_return:
...@@ -552,6 +554,7 @@ ENTRY(iret_exc) ...@@ -552,6 +554,7 @@ ENTRY(iret_exc)
.long irq_return,iret_exc .long irq_return,iret_exc
.previous .previous
#ifdef CONFIG_X86_ESPFIX32
CFI_RESTORE_STATE CFI_RESTORE_STATE
ldt_ss: ldt_ss:
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
...@@ -595,6 +598,7 @@ ldt_ss: ...@@ -595,6 +598,7 @@ ldt_ss:
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
jmp restore_nocheck jmp restore_nocheck
#endif
CFI_ENDPROC CFI_ENDPROC
ENDPROC(system_call) ENDPROC(system_call)
...@@ -766,6 +770,7 @@ ENDPROC(ptregs_clone) ...@@ -766,6 +770,7 @@ ENDPROC(ptregs_clone)
* the high word of the segment base from the GDT and swiches to the * the high word of the segment base from the GDT and swiches to the
* normal stack and adjusts ESP with the matching offset. * normal stack and adjusts ESP with the matching offset.
*/ */
#ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */ /* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
...@@ -775,8 +780,10 @@ ENDPROC(ptregs_clone) ...@@ -775,8 +780,10 @@ ENDPROC(ptregs_clone)
pushl_cfi %eax pushl_cfi %eax
lss (%esp), %esp /* switch to the normal stack segment */ lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
#endif
.endm .endm
.macro UNWIND_ESPFIX_STACK .macro UNWIND_ESPFIX_STACK
#ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax movl %ss, %eax
/* see if on espfix stack */ /* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
...@@ -787,6 +794,7 @@ ENDPROC(ptregs_clone) ...@@ -787,6 +794,7 @@ ENDPROC(ptregs_clone)
/* switch to normal stack */ /* switch to normal stack */
FIXUP_ESPFIX_STACK FIXUP_ESPFIX_STACK
27: 27:
#endif
.endm .endm
/* /*
...@@ -1323,11 +1331,13 @@ END(debug) ...@@ -1323,11 +1331,13 @@ END(debug)
*/ */
ENTRY(nmi) ENTRY(nmi)
RING0_INT_FRAME RING0_INT_FRAME
#ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax pushl_cfi %eax
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
popl_cfi %eax popl_cfi %eax
je nmi_espfix_stack je nmi_espfix_stack
#endif
cmpl $ia32_sysenter_target,(%esp) cmpl $ia32_sysenter_target,(%esp)
je nmi_stack_fixup je nmi_stack_fixup
pushl_cfi %eax pushl_cfi %eax
...@@ -1367,6 +1377,7 @@ nmi_debug_stack_check: ...@@ -1367,6 +1377,7 @@ nmi_debug_stack_check:
FIX_STACK 24, nmi_stack_correct, 1 FIX_STACK 24, nmi_stack_correct, 1
jmp nmi_stack_correct jmp nmi_stack_correct
#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack: nmi_espfix_stack:
/* We have a RING0_INT_FRAME here. /* We have a RING0_INT_FRAME here.
* *
...@@ -1388,6 +1399,7 @@ nmi_espfix_stack: ...@@ -1388,6 +1399,7 @@ nmi_espfix_stack:
lss 12+4(%esp), %esp # back to espfix stack lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24 CFI_ADJUST_CFA_OFFSET -24
jmp irq_return jmp irq_return
#endif
CFI_ENDPROC CFI_ENDPROC
END(nmi) END(nmi)
......
...@@ -865,8 +865,10 @@ irq_return: ...@@ -865,8 +865,10 @@ irq_return:
* Are we returning to a stack segment from the LDT? Note: in * Are we returning to a stack segment from the LDT? Note: in
* 64-bit mode SS:RSP on the exception stack is always valid. * 64-bit mode SS:RSP on the exception stack is always valid.
*/ */
#ifdef CONFIG_X86_ESPFIX64
testb $4,(SS-RIP)(%rsp) testb $4,(SS-RIP)(%rsp)
jnz irq_return_ldt jnz irq_return_ldt
#endif
irq_return_iret: irq_return_iret:
INTERRUPT_RETURN INTERRUPT_RETURN
...@@ -884,6 +886,7 @@ ENTRY(native_iret) ...@@ -884,6 +886,7 @@ ENTRY(native_iret)
.previous .previous
#endif #endif
#ifdef CONFIG_X86_ESPFIX64
irq_return_ldt: irq_return_ldt:
pushq_cfi %rax pushq_cfi %rax
pushq_cfi %rdi pushq_cfi %rdi
...@@ -907,6 +910,7 @@ irq_return_ldt: ...@@ -907,6 +910,7 @@ irq_return_ldt:
movq %rax,%rsp movq %rax,%rsp
popq_cfi %rax popq_cfi %rax
jmp irq_return_iret jmp irq_return_iret
#endif
.section .fixup,"ax" .section .fixup,"ax"
bad_iret: bad_iret:
...@@ -980,6 +984,7 @@ END(common_interrupt) ...@@ -980,6 +984,7 @@ END(common_interrupt)
* modify the stack to make it look like we just entered * modify the stack to make it look like we just entered
* the #GP handler from user space, similar to bad_iret. * the #GP handler from user space, similar to bad_iret.
*/ */
#ifdef CONFIG_X86_ESPFIX64
ALIGN ALIGN
__do_double_fault: __do_double_fault:
XCPT_FRAME 1 RDI+8 XCPT_FRAME 1 RDI+8
...@@ -1005,6 +1010,9 @@ __do_double_fault: ...@@ -1005,6 +1010,9 @@ __do_double_fault:
retq retq
CFI_ENDPROC CFI_ENDPROC
END(__do_double_fault) END(__do_double_fault)
#else
# define __do_double_fault do_double_fault
#endif
/* /*
* End of kprobes section * End of kprobes section
......
...@@ -230,6 +230,11 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ...@@ -230,6 +230,11 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
} }
} }
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
error = -EINVAL;
goto out_unlock;
}
fill_ldt(&ldt, &ldt_info); fill_ldt(&ldt, &ldt_info);
if (oldmode) if (oldmode)
ldt.avl = 0; ldt.avl = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment