Commit 7c14e4d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'hyperv-fixes-signed-20210722' of...

Merge tag 'hyperv-fixes-signed-20210722' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv fixes from Wei Liu:

 - bug fix from Haiyang for vmbus CPU assignment

 - revert of a bogus patch that went into 5.14-rc1

* tag 'hyperv-fixes-signed-20210722' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  Revert "x86/hyperv: fix logical processor creation"
  Drivers: hv: vmbus: Fix duplicate CPU assignments within a device
parents 4784dc99 f5a11c69
...@@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus) ...@@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) { for_each_present_cpu(i) {
if (i == 0) if (i == 0)
continue; continue;
ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i); ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
BUG_ON(ret); BUG_ON(ret);
} }
......
...@@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/ */
mutex_lock(&vmbus_connection.channel_mutex); mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
newchannel->primary_channel = channel;
break;
}
}
init_vp_index(newchannel); init_vp_index(newchannel);
/* Remember the channels that should be cleaned up upon suspend. */ /* Remember the channels that should be cleaned up upon suspend. */
...@@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/ */
atomic_dec(&vmbus_connection.offer_in_progress); atomic_dec(&vmbus_connection.offer_in_progress);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
}
if (fnew) { if (fnew) {
list_add_tail(&newchannel->listentry, list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list); &vmbus_connection.chn_list);
...@@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/* /*
* Process the sub-channel. * Process the sub-channel.
*/ */
newchannel->primary_channel = channel;
list_add_tail(&newchannel->sc_list, &channel->sc_list); list_add_tail(&newchannel->sc_list, &channel->sc_list);
} }
...@@ -683,6 +683,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -683,6 +683,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
queue_work(wq, &newchannel->add_channel_work); queue_work(wq, &newchannel->add_channel_work);
} }
/*
* Check if CPUs used by other channels of the same device.
* It should only be called by init_vp_index().
*/
static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
{
struct vmbus_channel *primary = chn->primary_channel;
struct vmbus_channel *sc;
lockdep_assert_held(&vmbus_connection.channel_mutex);
if (!primary)
return false;
if (primary->target_cpu == cpu)
return true;
list_for_each_entry(sc, &primary->sc_list, sc_list)
if (sc != chn && sc->target_cpu == cpu)
return true;
return false;
}
/* /*
* We use this state to statically distribute the channel interrupt load. * We use this state to statically distribute the channel interrupt load.
*/ */
...@@ -702,6 +726,7 @@ static int next_numa_node_id; ...@@ -702,6 +726,7 @@ static int next_numa_node_id;
static void init_vp_index(struct vmbus_channel *channel) static void init_vp_index(struct vmbus_channel *channel)
{ {
bool perf_chn = hv_is_perf_channel(channel); bool perf_chn = hv_is_perf_channel(channel);
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask; cpumask_var_t available_mask;
struct cpumask *alloced_mask; struct cpumask *alloced_mask;
u32 target_cpu; u32 target_cpu;
...@@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel) ...@@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel)
return; return;
} }
while (true) { for (i = 1; i <= ncpu + 1; i++) {
numa_node = next_numa_node_id++; while (true) {
if (numa_node == nr_node_ids) { numa_node = next_numa_node_id++;
next_numa_node_id = 0; if (numa_node == nr_node_ids) {
continue; next_numa_node_id = 0;
continue;
}
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
}
alloced_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(alloced_mask) ==
cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
*/
cpumask_clear(alloced_mask);
} }
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
}
alloced_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(alloced_mask) == cpumask_xor(available_mask, alloced_mask,
cpumask_weight(cpumask_of_node(numa_node))) { cpumask_of_node(numa_node));
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
*/
cpumask_clear(alloced_mask);
}
cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node)); target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, alloced_mask);
target_cpu = cpumask_first(available_mask); if (channel->offermsg.offer.sub_channel_index >= ncpu ||
cpumask_set_cpu(target_cpu, alloced_mask); i > ncpu || !hv_cpuself_used(target_cpu, channel))
break;
}
channel->target_cpu = target_cpu; channel->target_cpu = target_cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment