Commit 6640b5df authored by Saurabh Sengar's avatar Saurabh Sengar Committed by Wei Liu

Drivers: hv: vmbus: Don't assign VMbus channel interrupts to isolated CPUs

When initially assigning a VMbus channel interrupt to a CPU, don’t choose
a managed IRQ isolated CPU (as specified on the kernel boot line with
parameter 'isolcpus=managed_irq,<#cpu>'). Also, when using sysfs to change
the CPU that a VMbus channel will interrupt, don't allow changing to a
managed IRQ isolated CPU.
Signed-off-by: default avatarSaurabh Sengar <ssengar@linux.microsoft.com>
Reviewed-by: default avatarMichael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/1653637439-23060-1-git-send-email-ssengar@linux.microsoft.comSigned-off-by: default avatarWei Liu <wei.liu@kernel.org>
parent f2906aa8
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/hyperv.h> #include <linux/hyperv.h>
#include <asm/mshyperv.h> #include <asm/mshyperv.h>
#include <linux/sched/isolation.h>
#include "hyperv_vmbus.h" #include "hyperv_vmbus.h"
...@@ -728,16 +729,20 @@ static void init_vp_index(struct vmbus_channel *channel) ...@@ -728,16 +729,20 @@ static void init_vp_index(struct vmbus_channel *channel)
u32 i, ncpu = num_online_cpus(); u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask; cpumask_var_t available_mask;
struct cpumask *allocated_mask; struct cpumask *allocated_mask;
const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
u32 target_cpu; u32 target_cpu;
int numa_node; int numa_node;
if (!perf_chn || if (!perf_chn ||
!alloc_cpumask_var(&available_mask, GFP_KERNEL)) { !alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
cpumask_empty(hk_mask)) {
/* /*
* If the channel is not a performance critical * If the channel is not a performance critical
* channel, bind it to VMBUS_CONNECT_CPU. * channel, bind it to VMBUS_CONNECT_CPU.
* In case alloc_cpumask_var() fails, bind it to * In case alloc_cpumask_var() fails, bind it to
* VMBUS_CONNECT_CPU. * VMBUS_CONNECT_CPU.
* If all the cpus are isolated, bind it to
* VMBUS_CONNECT_CPU.
*/ */
channel->target_cpu = VMBUS_CONNECT_CPU; channel->target_cpu = VMBUS_CONNECT_CPU;
if (perf_chn) if (perf_chn)
...@@ -758,17 +763,19 @@ static void init_vp_index(struct vmbus_channel *channel) ...@@ -758,17 +763,19 @@ static void init_vp_index(struct vmbus_channel *channel)
} }
allocated_mask = &hv_context.hv_numa_map[numa_node]; allocated_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) { retry:
cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
cpumask_and(available_mask, available_mask, hk_mask);
if (cpumask_empty(available_mask)) {
/* /*
* We have cycled through all the CPUs in the node; * We have cycled through all the CPUs in the node;
* reset the allocated map. * reset the allocated map.
*/ */
cpumask_clear(allocated_mask); cpumask_clear(allocated_mask);
goto retry;
} }
cpumask_xor(available_mask, allocated_mask,
cpumask_of_node(numa_node));
target_cpu = cpumask_first(available_mask); target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, allocated_mask); cpumask_set_cpu(target_cpu, allocated_mask);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/sched/isolation.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, ...@@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (target_cpu >= nr_cpumask_bits) if (target_cpu >= nr_cpumask_bits)
return -EINVAL; return -EINVAL;
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
return -EINVAL;
/* No CPUs should come up or down during this. */ /* No CPUs should come up or down during this. */
cpus_read_lock(); cpus_read_lock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment