Commit b29ef354 authored by K. Y. Srinivasan's avatar K. Y. Srinivasan Committed by Greg Kroah-Hartman

Drivers: hv: vmbus: Cleanup hv_post_message()

Minimize failures in this function by pre-allocating the buffer
for posting messages. The hypercall for posting the message can fail
for a number of reasons:

        1. Transient resource related issues
        2. Buffer alignment
        3. Buffer cannot span a page boundry

We address issues 2 and 3 by preallocating a per-cpu page for the buffer.
Transient resource related failures are handled by retrying by the callers
of this function.

This patch is based on the investigation
done by Dexuan Cui <decui@microsoft.com>.

I would like to thank Sitsofe Wheeler <sitsofe@yahoo.com>
for reporting the issue and helping in debuggging.
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Reported-by: default avatarSitsofe Wheeler <sitsofe@yahoo.com>
Cc: <stable@vger.kernel.org>
Tested-by: default avatarSitsofe Wheeler <sitsofe@yahoo.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 98d731bb
...@@ -138,6 +138,8 @@ int hv_init(void) ...@@ -138,6 +138,8 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0, memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS); sizeof(void *) * NR_CPUS);
memset(hv_context.post_msg_page, 0,
sizeof(void *) * NR_CPUS);
memset(hv_context.vp_index, 0, memset(hv_context.vp_index, 0,
sizeof(int) * NR_CPUS); sizeof(int) * NR_CPUS);
memset(hv_context.event_dpc, 0, memset(hv_context.event_dpc, 0,
...@@ -217,26 +219,18 @@ int hv_post_message(union hv_connection_id connection_id, ...@@ -217,26 +219,18 @@ int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type, enum hv_message_type message_type,
void *payload, size_t payload_size) void *payload, size_t payload_size)
{ {
struct aligned_input {
u64 alignment8;
struct hv_input_post_message msg;
};
struct hv_input_post_message *aligned_msg; struct hv_input_post_message *aligned_msg;
u16 status; u16 status;
unsigned long addr;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -EMSGSIZE; return -EMSGSIZE;
addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
aligned_msg = (struct hv_input_post_message *) aligned_msg = (struct hv_input_post_message *)
(ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN)); hv_context.post_msg_page[get_cpu()];
aligned_msg->connectionid = connection_id; aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type; aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size; aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size); memcpy((void *)aligned_msg->payload, payload, payload_size);
...@@ -244,8 +238,7 @@ int hv_post_message(union hv_connection_id connection_id, ...@@ -244,8 +238,7 @@ int hv_post_message(union hv_connection_id connection_id,
status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
& 0xFFFF; & 0xFFFF;
kfree((void *)addr); put_cpu();
return status; return status;
} }
...@@ -294,6 +287,14 @@ int hv_synic_alloc(void) ...@@ -294,6 +287,14 @@ int hv_synic_alloc(void)
pr_err("Unable to allocate SYNIC event page\n"); pr_err("Unable to allocate SYNIC event page\n");
goto err; goto err;
} }
hv_context.post_msg_page[cpu] =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_context.post_msg_page[cpu] == NULL) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
} }
return 0; return 0;
...@@ -308,6 +309,8 @@ static void hv_synic_free_cpu(int cpu) ...@@ -308,6 +309,8 @@ static void hv_synic_free_cpu(int cpu)
free_page((unsigned long)hv_context.synic_event_page[cpu]); free_page((unsigned long)hv_context.synic_event_page[cpu]);
if (hv_context.synic_message_page[cpu]) if (hv_context.synic_message_page[cpu])
free_page((unsigned long)hv_context.synic_message_page[cpu]); free_page((unsigned long)hv_context.synic_message_page[cpu]);
if (hv_context.post_msg_page[cpu])
free_page((unsigned long)hv_context.post_msg_page[cpu]);
} }
void hv_synic_free(void) void hv_synic_free(void)
......
...@@ -515,6 +515,10 @@ struct hv_context { ...@@ -515,6 +515,10 @@ struct hv_context {
* per-cpu list of the channels based on their CPU affinity. * per-cpu list of the channels based on their CPU affinity.
*/ */
struct list_head percpu_list[NR_CPUS]; struct list_head percpu_list[NR_CPUS];
/*
* buffer to post messages to the host.
*/
void *post_msg_page[NR_CPUS];
}; };
extern struct hv_context hv_context; extern struct hv_context hv_context;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment