Commit ae7eb82a authored by Thiago Jung Bauermann's avatar Thiago Jung Bauermann Committed by Michael Ellerman

fs/core/vmcore: Move sev_active() reference to x86 arch code

Secure Encrypted Virtualization is an x86-specific feature, so it shouldn't
appear in generic kernel code because it forces non-x86 architectures to
define the sev_active() function, which doesn't make a lot of sense.

To solve this problem, add an x86 elfcorehdr_read() function to override
the generic weak implementation. To do that, it's necessary to make
read_from_oldmem() public so that it can be used outside of vmcore.c.

Also, remove the export for sev_active() since it's only used in files that
won't be built as modules.
Signed-off-by: default avatarThiago Jung Bauermann <bauerman@linux.ibm.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarLianbo Jiang <lijiang@redhat.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190806044919.10622-6-bauerman@linux.ibm.com
parent 284e21fa
...@@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, ...@@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
{ {
return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
} }
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
return read_from_oldmem(buf, count, ppos, 0, sev_active());
}
...@@ -349,7 +349,6 @@ bool sev_active(void) ...@@ -349,7 +349,6 @@ bool sev_active(void)
{ {
return sme_me_mask && sev_enabled; return sme_me_mask && sev_enabled;
} }
EXPORT_SYMBOL(sev_active);
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev) bool force_dma_unencrypted(struct device *dev)
......
...@@ -104,7 +104,7 @@ static int pfn_is_ram(unsigned long pfn) ...@@ -104,7 +104,7 @@ static int pfn_is_ram(unsigned long pfn)
} }
/* Reads a page from the oldmem device from given offset. */ /* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count, ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf, u64 *ppos, int userbuf,
bool encrypted) bool encrypted)
{ {
...@@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr) ...@@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/ */
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{ {
return read_from_oldmem(buf, count, ppos, 0, sev_active()); return read_from_oldmem(buf, count, ppos, 0, false);
} }
/* /*
......
...@@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) ...@@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#ifdef CONFIG_PROC_VMCORE
ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted);
#else
static inline ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_PROC_VMCORE */
#endif /* LINUX_CRASHDUMP_H */ #endif /* LINUX_CRASHDUMP_H */
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
static inline bool mem_encrypt_active(void) { return false; } static inline bool mem_encrypt_active(void) { return false; }
static inline bool sev_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment