Commit 111e7b15 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/ioperm: Extend IOPL config to control ioperm() as well

If iopl() is disabled, then providing ioperm() does not make much sense.

Rename the config option and disable/enable both syscalls with it. Guard
the code with #ifdefs where appropriate.
Suggested-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent a24ca997
...@@ -1254,10 +1254,13 @@ config X86_VSYSCALL_EMULATION ...@@ -1254,10 +1254,13 @@ config X86_VSYSCALL_EMULATION
Disabling this option saves about 7K of kernel size and Disabling this option saves about 7K of kernel size and
possibly 4K of additional runtime pagetable memory. possibly 4K of additional runtime pagetable memory.
config X86_IOPL_EMULATION config X86_IOPL_IOPERM
bool "IOPL Emulation" bool "IOPERM and IOPL Emulation"
default y default y
---help--- ---help---
This enables the ioperm() and iopl() syscalls which are necessary
for legacy applications.
Legacy IOPL support is an overbroad mechanism which allows user Legacy IOPL support is an overbroad mechanism which allows user
space aside of accessing all 65536 I/O ports also to disable space aside of accessing all 65536 I/O ports also to disable
interrupts. To gain this access the caller needs CAP_SYS_RAWIO interrupts. To gain this access the caller needs CAP_SYS_RAWIO
......
...@@ -15,9 +15,15 @@ struct io_bitmap { ...@@ -15,9 +15,15 @@ struct io_bitmap {
struct task_struct; struct task_struct;
#ifdef CONFIG_X86_IOPL_IOPERM
void io_bitmap_share(struct task_struct *tsk); void io_bitmap_share(struct task_struct *tsk);
void io_bitmap_exit(void); void io_bitmap_exit(void);
void tss_update_io_bitmap(void); void tss_update_io_bitmap(void);
#else
static inline void io_bitmap_share(struct task_struct *tsk) { }
static inline void io_bitmap_exit(void) { }
static inline void tss_update_io_bitmap(void) { }
#endif
#endif #endif
...@@ -340,13 +340,18 @@ struct x86_hw_tss { ...@@ -340,13 +340,18 @@ struct x86_hw_tss {
(offsetof(struct tss_struct, io_bitmap.mapall) - \ (offsetof(struct tss_struct, io_bitmap.mapall) - \
offsetof(struct tss_struct, x86_tss)) offsetof(struct tss_struct, x86_tss))
#ifdef CONFIG_X86_IOPL_IOPERM
/* /*
* sizeof(unsigned long) coming from an extra "long" at the end of the * sizeof(unsigned long) coming from an extra "long" at the end of the
* iobitmap. The limit is inclusive, i.e. the last valid byte. * iobitmap. The limit is inclusive, i.e. the last valid byte.
*/ */
#define __KERNEL_TSS_LIMIT \ # define __KERNEL_TSS_LIMIT \
(IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \ (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
sizeof(unsigned long) - 1) sizeof(unsigned long) - 1)
#else
# define __KERNEL_TSS_LIMIT \
(offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
#endif
/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */ /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1) #define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
...@@ -398,7 +403,9 @@ struct tss_struct { ...@@ -398,7 +403,9 @@ struct tss_struct {
*/ */
struct x86_hw_tss x86_tss; struct x86_hw_tss x86_tss;
#ifdef CONFIG_X86_IOPL_IOPERM
struct x86_io_bitmap io_bitmap; struct x86_io_bitmap io_bitmap;
#endif
} __aligned(PAGE_SIZE); } __aligned(PAGE_SIZE);
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
......
...@@ -156,8 +156,13 @@ struct thread_info { ...@@ -156,8 +156,13 @@ struct thread_info {
# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE) # define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
#endif #endif
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY | \ #ifdef CONFIG_X86_IOPL_IOPERM
# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY | \
_TIF_IO_BITMAP) _TIF_IO_BITMAP)
#else
# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY)
#endif
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
#define STACK_WARN (THREAD_SIZE/8) #define STACK_WARN (THREAD_SIZE/8)
......
...@@ -1804,6 +1804,22 @@ static inline void gdt_setup_doublefault_tss(int cpu) ...@@ -1804,6 +1804,22 @@ static inline void gdt_setup_doublefault_tss(int cpu)
} }
#endif /* !CONFIG_X86_64 */ #endif /* !CONFIG_X86_64 */
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
{
tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
#ifdef CONFIG_X86_IOPL_IOPERM
tss->io_bitmap.prev_max = 0;
tss->io_bitmap.prev_sequence = 0;
memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
/*
* Invalidate the extra array entry past the end of the all
* permission bitmap as required by the hardware.
*/
tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
#endif
}
/* /*
* cpu_init() initializes state that is per-CPU. Some data is already * cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT * initialized (naturally) in the bootstrap process, such as the GDT
...@@ -1860,15 +1876,7 @@ void cpu_init(void) ...@@ -1860,15 +1876,7 @@ void cpu_init(void)
/* Initialize the TSS. */ /* Initialize the TSS. */
tss_setup_ist(tss); tss_setup_ist(tss);
tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; tss_setup_io_bitmap(tss);
tss->io_bitmap.prev_max = 0;
tss->io_bitmap.prev_sequence = 0;
memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
/*
* Invalidate the extra array entry past the end of the all
* permission bitmap as required by the hardware.
*/
tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc(); load_TR_desc();
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <asm/io_bitmap.h> #include <asm/io_bitmap.h>
#include <asm/desc.h> #include <asm/desc.h>
#ifdef CONFIG_X86_IOPL_IOPERM
static atomic64_t io_bitmap_sequence; static atomic64_t io_bitmap_sequence;
void io_bitmap_share(struct task_struct *tsk) void io_bitmap_share(struct task_struct *tsk)
...@@ -172,13 +174,6 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) ...@@ -172,13 +174,6 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
unsigned int old; unsigned int old;
/*
* Careful: the IOPL bits in regs->flags are undefined under Xen PV
* and changing them has no effect.
*/
if (IS_ENABLED(CONFIG_X86_IOPL_NONE))
return -ENOSYS;
if (level > 3) if (level > 3)
return -EINVAL; return -EINVAL;
...@@ -200,3 +195,20 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) ...@@ -200,3 +195,20 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
return 0; return 0;
} }
#else /* CONFIG_X86_IOPL_IOPERM */
long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
return -ENOSYS;
}
SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
{
return -ENOSYS;
}
SYSCALL_DEFINE1(iopl, unsigned int, level)
{
return -ENOSYS;
}
#endif
...@@ -322,6 +322,7 @@ void arch_setup_new_exec(void) ...@@ -322,6 +322,7 @@ void arch_setup_new_exec(void)
} }
} }
#ifdef CONFIG_X86_IOPL_IOPERM
static inline void tss_invalidate_io_bitmap(struct tss_struct *tss) static inline void tss_invalidate_io_bitmap(struct tss_struct *tss)
{ {
/* /*
...@@ -409,6 +410,9 @@ void tss_update_io_bitmap(void) ...@@ -409,6 +410,9 @@ void tss_update_io_bitmap(void)
tss_invalidate_io_bitmap(tss); tss_invalidate_io_bitmap(tss);
} }
} }
#else /* CONFIG_X86_IOPL_IOPERM */
static inline void switch_to_bitmap(unsigned long tifp) { }
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment