Commit 4b692e86 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Linus Torvalds

kexec: move locking into do_kexec_load

Patch series "compat: remove compat_alloc_user_space", v5.

Going through compat_alloc_user_space() to convert indirect system call
arguments tends to add complexity compared to handling the native and
compat logic in the same code.

This patch (of 6):

The locking is the same between the native and compat version of
sys_kexec_load(), so it can be done in the common implementation to reduce
duplication.

Link: https://lkml.kernel.org/r/20210727144859.4150043-1-arnd@kernel.org
Link: https://lkml.kernel.org/r/20210727144859.4150043-2-arnd@kernel.orgSigned-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Co-developed-by: default avatarEric Biederman <ebiederm@xmission.com>
Co-developed-by: default avatarChristoph Hellwig <hch@infradead.org>
Acked-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Feng Tang <feng.tang@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 213ecb31
...@@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, ...@@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
unsigned long i; unsigned long i;
int ret; int ret;
/*
* Because we write directly to the reserved memory region when loading
* crash kernels we need a mutex here to prevent multiple crash kernels
* from attempting to load simultaneously, and to prevent a crash kernel
* from loading over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
if (flags & KEXEC_ON_CRASH) { if (flags & KEXEC_ON_CRASH) {
dest_image = &kexec_crash_image; dest_image = &kexec_crash_image;
if (kexec_crash_image) if (kexec_crash_image)
...@@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, ...@@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
if (nr_segments == 0) { if (nr_segments == 0) {
/* Uninstall image */ /* Uninstall image */
kimage_free(xchg(dest_image, NULL)); kimage_free(xchg(dest_image, NULL));
return 0; ret = 0;
goto out_unlock;
} }
if (flags & KEXEC_ON_CRASH) { if (flags & KEXEC_ON_CRASH) {
/* /*
...@@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, ...@@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
if (ret) if (ret)
return ret; goto out_unlock;
if (flags & KEXEC_PRESERVE_CONTEXT) if (flags & KEXEC_PRESERVE_CONTEXT)
image->preserve_context = 1; image->preserve_context = 1;
...@@ -171,6 +183,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, ...@@ -171,6 +183,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
arch_kexec_protect_crashkres(); arch_kexec_protect_crashkres();
kimage_free(image); kimage_free(image);
out_unlock:
mutex_unlock(&kexec_mutex);
return ret; return ret;
} }
...@@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, ...@@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
return -EINVAL; return -EINVAL;
/* Because we write directly to the reserved memory
* region when loading crash kernels we need a mutex here to
* prevent multiple crash kernels from attempting to load
* simultaneously, and to prevent a crash kernel from loading
* over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
result = do_kexec_load(entry, nr_segments, segments, flags); result = do_kexec_load(entry, nr_segments, segments, flags);
mutex_unlock(&kexec_mutex);
return result; return result;
} }
...@@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, ...@@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
return -EFAULT; return -EFAULT;
} }
/* Because we write directly to the reserved memory
* region when loading crash kernels we need a mutex here to
* prevent multiple crash kernels from attempting to load
* simultaneously, and to prevent a crash kernel from loading
* over the top of a in use crash kernel.
*
* KISS: always take the mutex.
*/
if (!mutex_trylock(&kexec_mutex))
return -EBUSY;
result = do_kexec_load(entry, nr_segments, ksegments, flags); result = do_kexec_load(entry, nr_segments, ksegments, flags);
mutex_unlock(&kexec_mutex);
return result; return result;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment