Commit 0f5d752b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.18-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Heiko Carstens:

 - Disable -Warray-bounds warning for gcc12, since the only known way to
   workaround false positive warnings on lowcore accesses would result
   in worse code on fast paths.

 - Avoid lockdep_assert_held() warning in kvm vm memop code.

 - Reduce overhead within gmap_rmap code to get rid of long latencies
   when e.g. shutting down 2nd level guests.

* tag 's390-5.18-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  KVM: s390: vsie/gmap: reduce gmap_rmap overhead
  KVM: s390: Fix lockdep issue in vm memop
  s390: disable -Warray-bounds
parents 905a6537 a06afe83
...@@ -30,6 +30,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector ...@@ -30,6 +30,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -ge, 1200, y), y)
ifeq ($(call cc-ifversion, -lt, 1300, y), y)
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
endif
endif
endif
UTS_MACHINE := s390x UTS_MACHINE := s390x
STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384) STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
CHECKFLAGS += -D__s390__ -D__s390x__ CHECKFLAGS += -D__s390__ -D__s390x__
......
...@@ -2384,7 +2384,16 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) ...@@ -2384,7 +2384,16 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
return -EINVAL; return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE) if (mop->size > MEM_OP_MAX_SIZE)
return -E2BIG; return -E2BIG;
if (kvm_s390_pv_is_protected(kvm)) /*
* This is technically a heuristic only, if the kvm->lock is not
* taken, it is not guaranteed that the vm is/remains non-protected.
* This is ok from a kernel perspective, wrongdoing is detected
* on the access, -EFAULT is returned and the vm may crash the
* next time it accesses the memory in question.
* There is no sane usecase to do switching and a memop on two
* different CPUs at the same time.
*/
if (kvm_s390_pv_get_handle(kvm))
return -EINVAL; return -EINVAL;
if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
if (access_key_invalid(mop->key)) if (access_key_invalid(mop->key))
......
...@@ -1183,6 +1183,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table); ...@@ -1183,6 +1183,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table);
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
struct gmap_rmap *rmap) struct gmap_rmap *rmap)
{ {
struct gmap_rmap *temp;
void __rcu **slot; void __rcu **slot;
BUG_ON(!gmap_is_shadow(sg)); BUG_ON(!gmap_is_shadow(sg));
...@@ -1190,6 +1191,12 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, ...@@ -1190,6 +1191,12 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
if (slot) { if (slot) {
rmap->next = radix_tree_deref_slot_protected(slot, rmap->next = radix_tree_deref_slot_protected(slot,
&sg->guest_table_lock); &sg->guest_table_lock);
for (temp = rmap->next; temp; temp = temp->next) {
if (temp->raddr == rmap->raddr) {
kfree(rmap);
return;
}
}
radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap); radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
} else { } else {
rmap->next = NULL; rmap->next = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment