Commit 084ea4d6 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Christian Borntraeger

s390/mm: add (non)secure page access exceptions handlers

Add exceptions handlers performing transparent transition of non-secure
pages to secure (import) upon guest access and secure pages to
non-secure (export) upon hypervisor access.
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
[frankja@linux.ibm.com: adding checks for failures]
Signed-off-by: default avatarJanosch Frank <frankja@linux.ibm.com>
[imbrenda@linux.ibm.com:  adding a check for gmap fault]
Signed-off-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarCornelia Huck <cohuck@redhat.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
[borntraeger@de.ibm.com: patch merging, splitting, fixing]
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 214d9bbc
...@@ -24,6 +24,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); ...@@ -24,6 +24,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs); void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs);
void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs); void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs); void data_exception(struct pt_regs *regs);
......
...@@ -78,8 +78,8 @@ PGM_CHECK(do_dat_exception) /* 39 */ ...@@ -78,8 +78,8 @@ PGM_CHECK(do_dat_exception) /* 39 */
PGM_CHECK(do_dat_exception) /* 3a */ PGM_CHECK(do_dat_exception) /* 3a */
PGM_CHECK(do_dat_exception) /* 3b */ PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */ PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK_DEFAULT /* 3d */ PGM_CHECK(do_secure_storage_access) /* 3d */
PGM_CHECK_DEFAULT /* 3e */ PGM_CHECK(do_non_secure_storage_access) /* 3e */
PGM_CHECK_DEFAULT /* 3f */ PGM_CHECK_DEFAULT /* 3f */
PGM_CHECK(monitor_event_exception) /* 40 */ PGM_CHECK(monitor_event_exception) /* 40 */
PGM_CHECK_DEFAULT /* 41 */ PGM_CHECK_DEFAULT /* 41 */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/uv.h>
#include "../kernel/entry.h" #include "../kernel/entry.h"
#define __FAIL_ADDR_MASK -4096L #define __FAIL_ADDR_MASK -4096L
...@@ -816,3 +817,80 @@ static int __init pfault_irq_init(void) ...@@ -816,3 +817,80 @@ static int __init pfault_irq_init(void)
early_initcall(pfault_irq_init); early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */ #endif /* CONFIG_PFAULT */
#if IS_ENABLED(CONFIG_PGSTE)
void do_secure_storage_access(struct pt_regs *regs)
{
unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct vm_area_struct *vma;
struct mm_struct *mm;
struct page *page;
int rc;
switch (get_fault_type(regs)) {
case USER_FAULT:
mm = current->mm;
down_read(&mm->mmap_sem);
vma = find_vma(mm, addr);
if (!vma) {
up_read(&mm->mmap_sem);
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
break;
}
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page)) {
up_read(&mm->mmap_sem);
break;
}
if (arch_make_page_accessible(page))
send_sig(SIGSEGV, current, 0);
put_page(page);
up_read(&mm->mmap_sem);
break;
case KERNEL_FAULT:
page = phys_to_page(addr);
if (unlikely(!try_get_page(page)))
break;
rc = arch_make_page_accessible(page);
put_page(page);
if (rc)
BUG();
break;
case VDSO_FAULT:
/* fallthrough */
case GMAP_FAULT:
/* fallthrough */
default:
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
WARN_ON_ONCE(1);
}
}
NOKPROBE_SYMBOL(do_secure_storage_access);
void do_non_secure_storage_access(struct pt_regs *regs)
{
unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
if (get_fault_type(regs) != GMAP_FAULT) {
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
WARN_ON_ONCE(1);
return;
}
if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
send_sig(SIGSEGV, current, 0);
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);
#else
void do_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
void do_non_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment