Commit 9fe6a8c5 authored by Juergen Gross's avatar Juergen Gross

x86/xen: remove deprecated xen_nopvspin boot parameter

The xen_nopvspin boot parameter is deprecated since 2019. nopvspin
can be used instead.

Remove the xen_nopvspin boot parameter and replace the xen_pvspin
variable use cases with nopvspin.

This requires to move the nopvspin variable out of the .initdata
section, as it needs to be accessed for cpuhotplug, too.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Message-ID: <20240710110139.22300-1-jgross@suse.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent bcea31e2
...@@ -7439,11 +7439,6 @@ ...@@ -7439,11 +7439,6 @@
access functions when running as Xen PV guest. The access functions when running as Xen PV guest. The
default value is controlled by CONFIG_XEN_PV_MSR_SAFE. default value is controlled by CONFIG_XEN_PV_MSR_SAFE.
xen_nopvspin [X86,XEN,EARLY]
Disables the qspinlock slowpath using Xen PV optimizations.
This parameter is obsoleted by "nopvspin" parameter, which
has equivalent effect for XEN platform.
xen_nopv [X86] xen_nopv [X86]
Disables the PV optimizations forcing the HVM guest to Disables the PV optimizations forcing the HVM guest to
run as generic HVM guest with no PV drivers. run as generic HVM guest with no PV drivers.
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name); static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu) static void xen_qlock_kick(int cpu)
{ {
...@@ -68,7 +67,7 @@ void xen_init_lock_cpu(int cpu) ...@@ -68,7 +67,7 @@ void xen_init_lock_cpu(int cpu)
int irq; int irq;
char *name; char *name;
if (!xen_pvspin) if (nopvspin)
return; return;
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
...@@ -95,7 +94,7 @@ void xen_uninit_lock_cpu(int cpu) ...@@ -95,7 +94,7 @@ void xen_uninit_lock_cpu(int cpu)
{ {
int irq; int irq;
if (!xen_pvspin) if (nopvspin)
return; return;
kfree(per_cpu(irq_name, cpu)); kfree(per_cpu(irq_name, cpu));
...@@ -125,10 +124,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); ...@@ -125,10 +124,10 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
void __init xen_init_spinlocks(void) void __init xen_init_spinlocks(void)
{ {
/* Don't need to use pvqspinlock code if there is only 1 vCPU. */ /* Don't need to use pvqspinlock code if there is only 1 vCPU. */
if (num_possible_cpus() == 1 || nopvspin) if (num_possible_cpus() == 1)
xen_pvspin = false; nopvspin = true;
if (!xen_pvspin) { if (nopvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
static_branch_disable(&virt_spin_lock_key); static_branch_disable(&virt_spin_lock_key);
return; return;
...@@ -143,12 +142,3 @@ void __init xen_init_spinlocks(void) ...@@ -143,12 +142,3 @@ void __init xen_init_spinlocks(void)
pv_ops.lock.kick = xen_qlock_kick; pv_ops.lock.kick = xen_qlock_kick;
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
} }
static __init int xen_parse_nopvspin(char *arg)
{
pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n");
xen_pvspin = false;
return 0;
}
early_param("xen_nopvspin", xen_parse_nopvspin);
...@@ -583,7 +583,7 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); ...@@ -583,7 +583,7 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
#include "qspinlock_paravirt.h" #include "qspinlock_paravirt.h"
#include "qspinlock.c" #include "qspinlock.c"
bool nopvspin __initdata; bool nopvspin;
static __init int parse_nopvspin(char *arg) static __init int parse_nopvspin(char *arg)
{ {
nopvspin = true; nopvspin = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment