Commit f4debb40 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

s390/mm: take ipte_lock during shadow faults

Let's take the ipte_lock while working on guest 2 provided page table, just
like the other gaccess functions.
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 7a674157
...@@ -1073,6 +1073,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1073,6 +1073,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
/** /**
* kvm_s390_shadow_fault - handle fault on a shadow page table * kvm_s390_shadow_fault - handle fault on a shadow page table
* @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure * @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap * @saddr: faulting address in the shadow gmap
* *
...@@ -1082,7 +1083,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1082,7 +1083,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* - -EFAULT when accessing invalid guest addresses * - -EFAULT when accessing invalid guest addresses
* - -ENOMEM if out of memory * - -ENOMEM if out of memory
*/ */
int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr) int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
unsigned long saddr)
{ {
union vaddress vaddr; union vaddress vaddr;
union page_table_entry pte; union page_table_entry pte;
...@@ -1091,6 +1093,12 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr) ...@@ -1091,6 +1093,12 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
int rc; int rc;
down_read(&sg->mm->mmap_sem); down_read(&sg->mm->mmap_sem);
/*
* We don't want any guest-2 tables to change - so the parent
* tables/pointers we read stay valid - unshadowing is however
* always possible - only guest_table_lock protects us.
*/
ipte_lock(vcpu);
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection); rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection);
if (rc) if (rc)
...@@ -1105,6 +1113,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr) ...@@ -1105,6 +1113,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
rc = PGM_TRANSLATION_SPEC; rc = PGM_TRANSLATION_SPEC;
if (!rc) if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
ipte_unlock(vcpu);
up_read(&sg->mm->mmap_sem); up_read(&sg->mm->mmap_sem);
return rc; return rc;
} }
...@@ -361,6 +361,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu); ...@@ -361,6 +361,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr); int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
unsigned long saddr);
#endif /* __KVM_S390_GACCESS_H */ #endif /* __KVM_S390_GACCESS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment