Commit f9dc72e8 authored by Heiko Carstens's avatar Heiko Carstens Committed by Marcelo Tosatti

s390/kvm,gaccess: shorten copy_to/from_guest code

The code can be significantly shortened. There is no functional change,
except that for large (> PAGE_SIZE) copies the guest translation would
be done more frequently.
However, there is not a single user which does this currently. If one
gets added later on this functionality can be added easily again.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 396083a9
...@@ -18,16 +18,19 @@ ...@@ -18,16 +18,19 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "kvm-s390.h" #include "kvm-s390.h"
static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr,
int prefixing)
{ {
unsigned long prefix = vcpu->arch.sie_block->prefix; unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long gaddr = (unsigned long) gptr; unsigned long gaddr = (unsigned long) gptr;
unsigned long uaddr; unsigned long uaddr;
if (prefixing) {
if (gaddr < 2 * PAGE_SIZE) if (gaddr < 2 * PAGE_SIZE)
gaddr += prefix; gaddr += prefix;
else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE)) else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
gaddr -= prefix; gaddr -= prefix;
}
uaddr = gmap_fault(gaddr, vcpu->arch.gmap); uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
if (IS_ERR_VALUE(uaddr)) if (IS_ERR_VALUE(uaddr))
uaddr = -EFAULT; uaddr = -EFAULT;
...@@ -36,7 +39,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) ...@@ -36,7 +39,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
#define get_guest(vcpu, x, gptr) \ #define get_guest(vcpu, x, gptr) \
({ \ ({ \
__typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
int __mask = sizeof(__typeof__(*(gptr))) - 1; \ int __mask = sizeof(__typeof__(*(gptr))) - 1; \
int __ret = PTR_RET(__uptr); \ int __ret = PTR_RET(__uptr); \
\ \
...@@ -49,7 +52,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) ...@@ -49,7 +52,7 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
#define put_guest(vcpu, x, gptr) \ #define put_guest(vcpu, x, gptr) \
({ \ ({ \
__typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \ __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
int __mask = sizeof(__typeof__(*(gptr))) - 1; \ int __mask = sizeof(__typeof__(*(gptr))) - 1; \
int __ret = PTR_RET(__uptr); \ int __ret = PTR_RET(__uptr); \
\ \
...@@ -60,255 +63,40 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr) ...@@ -60,255 +63,40 @@ static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
__ret; \ __ret; \
}) })
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
unsigned long guestdest, unsigned long from, unsigned long len,
void *from, unsigned long n) int to_guest, int prefixing)
{ {
int rc; unsigned long _len, rc;
unsigned long i; void *uptr;
u8 *data = from;
for (i = 0; i < n; i++) {
rc = put_guest(vcpu, *(data++), (u8 *)guestdest++);
if (rc < 0)
return rc;
}
return 0;
}
static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, while (len) {
unsigned long guestdest, uptr = to_guest ? (void *)to : (void *)from;
void *from, unsigned long n) uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
{ if (IS_ERR(uptr))
int r;
void __user *uptr;
unsigned long size;
if (guestdest + n < guestdest)
return -EFAULT; return -EFAULT;
_len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
/* simple case: all within one segment table entry? */ _len = min(_len, len);
if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) { if (to_guest)
uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap); rc = copy_to_user(uptr, (void *)from, _len);
else
if (IS_ERR((void __force *) uptr)) rc = copy_from_user((void *)to, uptr, _len);
return PTR_ERR((void __force *) uptr); if (rc)
r = copy_to_user(uptr, from, n);
if (r)
r = -EFAULT;
goto out;
}
/* copy first segment */
uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
size = PMD_SIZE - (guestdest & ~PMD_MASK);
r = copy_to_user(uptr, from, size);
if (r) {
r = -EFAULT;
goto out;
}
from += size;
n -= size;
guestdest += size;
/* copy full segments */
while (n >= PMD_SIZE) {
uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
r = copy_to_user(uptr, from, PMD_SIZE);
if (r) {
r = -EFAULT;
goto out;
}
from += PMD_SIZE;
n -= PMD_SIZE;
guestdest += PMD_SIZE;
}
/* copy the tail segment */
if (n) {
uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
r = copy_to_user(uptr, from, n);
if (r)
r = -EFAULT;
}
out:
return r;
}
static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
unsigned long guestdest,
void *from, unsigned long n)
{
return __copy_to_guest_fast(vcpu, guestdest, from, n);
}
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
void *from, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
if ((guestdest < prefix) && (guestdest + n > prefix))
goto slowpath;
if ((guestdest < prefix + 2 * PAGE_SIZE)
&& (guestdest + n > prefix + 2 * PAGE_SIZE))
goto slowpath;
if (guestdest < 2 * PAGE_SIZE)
guestdest += prefix;
else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
guestdest -= prefix;
return __copy_to_guest_fast(vcpu, guestdest, from, n);
slowpath:
return __copy_to_guest_slow(vcpu, guestdest, from, n);
}
static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
int rc;
unsigned long i;
u8 *data = to;
for (i = 0; i < n; i++) {
rc = get_guest(vcpu, *(data++), (u8 *)guestsrc++);
if (rc < 0)
return rc;
}
return 0;
}
static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
int r;
void __user *uptr;
unsigned long size;
if (guestsrc + n < guestsrc)
return -EFAULT; return -EFAULT;
len -= _len;
/* simple case: all within one segment table entry? */ from += _len;
if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) { to += _len;
uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
r = copy_from_user(to, uptr, n);
if (r)
r = -EFAULT;
goto out;
}
/* copy first segment */
uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
size = PMD_SIZE - (guestsrc & ~PMD_MASK);
r = copy_from_user(to, uptr, size);
if (r) {
r = -EFAULT;
goto out;
}
to += size;
n -= size;
guestsrc += size;
/* copy full segments */
while (n >= PMD_SIZE) {
uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
r = copy_from_user(to, uptr, PMD_SIZE);
if (r) {
r = -EFAULT;
goto out;
}
to += PMD_SIZE;
n -= PMD_SIZE;
guestsrc += PMD_SIZE;
} }
return 0;
/* copy the tail segment */
if (n) {
uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);
r = copy_from_user(to, uptr, n);
if (r)
r = -EFAULT;
}
out:
return r;
}
static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
return __copy_from_guest_fast(vcpu, to, guestsrc, n);
} }
static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, #define copy_to_guest(vcpu, to, from, size) \
unsigned long guestsrc, unsigned long n) __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
{ #define copy_from_guest(vcpu, to, from, size) \
unsigned long prefix = vcpu->arch.sie_block->prefix; __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
#define copy_to_guest_absolute(vcpu, to, from, size) \
if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
goto slowpath; #define copy_from_guest_absolute(vcpu, to, from, size) \
__copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
if ((guestsrc < prefix) && (guestsrc + n > prefix))
goto slowpath;
if ((guestsrc < prefix + 2 * PAGE_SIZE)
&& (guestsrc + n > prefix + 2 * PAGE_SIZE))
goto slowpath;
if (guestsrc < 2 * PAGE_SIZE) #endif /* __KVM_S390_GACCESS_H */
guestsrc += prefix;
else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
guestsrc -= prefix;
return __copy_from_guest_fast(vcpu, to, guestsrc, n);
slowpath:
return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment