Commit 54e19d34 authored by Dexuan Cui's avatar Dexuan Cui Committed by Sasha Levin

hv_utils: Add the support of hibernation

Add util_pre_suspend() and util_pre_resume() for some hv_utils devices
(e.g. kvp/vss/fcopy), because they need special handling before
util_suspend() calls vmbus_close().

For kvp, all the possible pending work items should be cancelled.

For vss and fcopy, some extra clean-up needs to be done, i.e. fake a
THAW message for hv_vss_daemon and fake a CANCEL_FCOPY message for
hv_fcopy_daemon, otherwise when the VM resums back, the daemons
can end up in an inconsistent state (i.e. the file systems are
frozen but will never be thawed; the file transmitted via fcopy
may not be complete). Note: there is an extra patch for the daemons:
"Tools: hv: Reopen the devices if read() or write() returns errors",
because the hv_utils driver can not guarantee the whole transaction
finishes completely once util_suspend() starts to run (at this time,
all the userspace processes are frozen).

util_probe() disables channel->callback_event to avoid the race with
the channel callback.
Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Reviewed-by: default avatarMichael Kelley <mikelley@microsoft.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent ffd1d4a4
...@@ -346,9 +346,61 @@ int hv_fcopy_init(struct hv_util_service *srv) ...@@ -346,9 +346,61 @@ int hv_fcopy_init(struct hv_util_service *srv)
return 0; return 0;
} }
static void hv_fcopy_cancel_work(void)
{
cancel_delayed_work_sync(&fcopy_timeout_work);
cancel_work_sync(&fcopy_send_work);
}
int hv_fcopy_pre_suspend(void)
{
struct vmbus_channel *channel = fcopy_transaction.recv_channel;
struct hv_fcopy_hdr *fcopy_msg;
/*
* Fake a CANCEL_FCOPY message for the user space daemon in case the
* daemon is in the middle of copying some file. It doesn't matter if
* there is already a message pending to be delivered to the user
* space since we force fcopy_transaction.state to be HVUTIL_READY, so
* the user space daemon's write() will fail with EINVAL (see
* fcopy_on_msg()), and the daemon will reset the device by closing
* and re-opening it.
*/
fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
if (!fcopy_msg)
return -ENOMEM;
tasklet_disable(&channel->callback_event);
fcopy_msg->operation = CANCEL_FCOPY;
hv_fcopy_cancel_work();
/* We don't care about the return value. */
hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);
kfree(fcopy_msg);
fcopy_transaction.state = HVUTIL_READY;
/* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
return 0;
}
int hv_fcopy_pre_resume(void)
{
struct vmbus_channel *channel = fcopy_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_fcopy_deinit(void) void hv_fcopy_deinit(void)
{ {
fcopy_transaction.state = HVUTIL_DEVICE_DYING; fcopy_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&fcopy_timeout_work);
hv_fcopy_cancel_work();
hvutil_transport_destroy(hvt); hvutil_transport_destroy(hvt);
} }
...@@ -758,11 +758,50 @@ hv_kvp_init(struct hv_util_service *srv) ...@@ -758,11 +758,50 @@ hv_kvp_init(struct hv_util_service *srv)
return 0; return 0;
} }
void hv_kvp_deinit(void) static void hv_kvp_cancel_work(void)
{ {
kvp_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&kvp_host_handshake_work); cancel_delayed_work_sync(&kvp_host_handshake_work);
cancel_delayed_work_sync(&kvp_timeout_work); cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work); cancel_work_sync(&kvp_sendkey_work);
}
int hv_kvp_pre_suspend(void)
{
struct vmbus_channel *channel = kvp_transaction.recv_channel;
tasklet_disable(&channel->callback_event);
/*
* If there is a pending transtion, it's unnecessary to tell the host
* that the transaction will fail, because that is implied when
* util_suspend() calls vmbus_close() later.
*/
hv_kvp_cancel_work();
/*
* Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message
* later. The user space daemon may go out of order and its write()
* may fail with EINVAL: this doesn't matter since the daemon will
* reset the device by closing and re-opening it.
*/
kvp_transaction.state = HVUTIL_READY;
return 0;
}
int hv_kvp_pre_resume(void)
{
struct vmbus_channel *channel = kvp_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_kvp_deinit(void)
{
kvp_transaction.state = HVUTIL_DEVICE_DYING;
hv_kvp_cancel_work();
hvutil_transport_destroy(hvt); hvutil_transport_destroy(hvt);
} }
...@@ -379,10 +379,61 @@ hv_vss_init(struct hv_util_service *srv) ...@@ -379,10 +379,61 @@ hv_vss_init(struct hv_util_service *srv)
return 0; return 0;
} }
void hv_vss_deinit(void) static void hv_vss_cancel_work(void)
{ {
vss_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&vss_timeout_work); cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work); cancel_work_sync(&vss_handle_request_work);
}
int hv_vss_pre_suspend(void)
{
struct vmbus_channel *channel = vss_transaction.recv_channel;
struct hv_vss_msg *vss_msg;
/*
* Fake a THAW message for the user space daemon in case the daemon
* has frozen the file systems. It doesn't matter if there is already
* a message pending to be delivered to the user space since we force
* vss_transaction.state to be HVUTIL_READY, so the user space daemon's
* write() will fail with EINVAL (see vss_on_msg()), and the daemon
* will reset the device by closing and re-opening it.
*/
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
return -ENOMEM;
tasklet_disable(&channel->callback_event);
vss_msg->vss_hdr.operation = VSS_OP_THAW;
/* Cancel any possible pending work. */
hv_vss_cancel_work();
/* We don't care about the return value. */
hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
kfree(vss_msg);
vss_transaction.state = HVUTIL_READY;
/* tasklet_enable() will be called in hv_vss_pre_resume(). */
return 0;
}
int hv_vss_pre_resume(void)
{
struct vmbus_channel *channel = vss_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_vss_deinit(void)
{
vss_transaction.state = HVUTIL_DEVICE_DYING;
hv_vss_cancel_work();
hvutil_transport_destroy(hvt); hvutil_transport_destroy(hvt);
} }
...@@ -123,12 +123,14 @@ static struct hv_util_service util_shutdown = { ...@@ -123,12 +123,14 @@ static struct hv_util_service util_shutdown = {
}; };
static int hv_timesync_init(struct hv_util_service *srv); static int hv_timesync_init(struct hv_util_service *srv);
static int hv_timesync_pre_suspend(void);
static void hv_timesync_deinit(void); static void hv_timesync_deinit(void);
static void timesync_onchannelcallback(void *context); static void timesync_onchannelcallback(void *context);
static struct hv_util_service util_timesynch = { static struct hv_util_service util_timesynch = {
.util_cb = timesync_onchannelcallback, .util_cb = timesync_onchannelcallback,
.util_init = hv_timesync_init, .util_init = hv_timesync_init,
.util_pre_suspend = hv_timesync_pre_suspend,
.util_deinit = hv_timesync_deinit, .util_deinit = hv_timesync_deinit,
}; };
...@@ -140,18 +142,24 @@ static struct hv_util_service util_heartbeat = { ...@@ -140,18 +142,24 @@ static struct hv_util_service util_heartbeat = {
static struct hv_util_service util_kvp = { static struct hv_util_service util_kvp = {
.util_cb = hv_kvp_onchannelcallback, .util_cb = hv_kvp_onchannelcallback,
.util_init = hv_kvp_init, .util_init = hv_kvp_init,
.util_pre_suspend = hv_kvp_pre_suspend,
.util_pre_resume = hv_kvp_pre_resume,
.util_deinit = hv_kvp_deinit, .util_deinit = hv_kvp_deinit,
}; };
static struct hv_util_service util_vss = { static struct hv_util_service util_vss = {
.util_cb = hv_vss_onchannelcallback, .util_cb = hv_vss_onchannelcallback,
.util_init = hv_vss_init, .util_init = hv_vss_init,
.util_pre_suspend = hv_vss_pre_suspend,
.util_pre_resume = hv_vss_pre_resume,
.util_deinit = hv_vss_deinit, .util_deinit = hv_vss_deinit,
}; };
static struct hv_util_service util_fcopy = { static struct hv_util_service util_fcopy = {
.util_cb = hv_fcopy_onchannelcallback, .util_cb = hv_fcopy_onchannelcallback,
.util_init = hv_fcopy_init, .util_init = hv_fcopy_init,
.util_pre_suspend = hv_fcopy_pre_suspend,
.util_pre_resume = hv_fcopy_pre_resume,
.util_deinit = hv_fcopy_deinit, .util_deinit = hv_fcopy_deinit,
}; };
...@@ -511,6 +519,44 @@ static int util_remove(struct hv_device *dev) ...@@ -511,6 +519,44 @@ static int util_remove(struct hv_device *dev)
return 0; return 0;
} }
/*
* When we're in util_suspend(), all the userspace processes have been frozen
* (refer to hibernate() -> freeze_processes()). The userspace is thawed only
* after the whole resume procedure, including util_resume(), finishes.
*/
static int util_suspend(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
int ret = 0;
if (srv->util_pre_suspend) {
ret = srv->util_pre_suspend();
if (ret)
return ret;
}
vmbus_close(dev->channel);
return 0;
}
static int util_resume(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
int ret = 0;
if (srv->util_pre_resume) {
ret = srv->util_pre_resume();
if (ret)
return ret;
}
ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
dev->channel);
return ret;
}
static const struct hv_vmbus_device_id id_table[] = { static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */ /* Shutdown guid */
{ HV_SHUTDOWN_GUID, { HV_SHUTDOWN_GUID,
...@@ -547,6 +593,8 @@ static struct hv_driver util_drv = { ...@@ -547,6 +593,8 @@ static struct hv_driver util_drv = {
.id_table = id_table, .id_table = id_table,
.probe = util_probe, .probe = util_probe,
.remove = util_remove, .remove = util_remove,
.suspend = util_suspend,
.resume = util_resume,
.driver = { .driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS, .probe_type = PROBE_PREFER_ASYNCHRONOUS,
}, },
...@@ -616,11 +664,23 @@ static int hv_timesync_init(struct hv_util_service *srv) ...@@ -616,11 +664,23 @@ static int hv_timesync_init(struct hv_util_service *srv)
return 0; return 0;
} }
static void hv_timesync_cancel_work(void)
{
cancel_work_sync(&adj_time_work);
}
static int hv_timesync_pre_suspend(void)
{
hv_timesync_cancel_work();
return 0;
}
static void hv_timesync_deinit(void) static void hv_timesync_deinit(void)
{ {
if (hv_ptp_clock) if (hv_ptp_clock)
ptp_clock_unregister(hv_ptp_clock); ptp_clock_unregister(hv_ptp_clock);
cancel_work_sync(&adj_time_work);
hv_timesync_cancel_work();
} }
static int __init init_hyperv_utils(void) static int __init init_hyperv_utils(void)
......
...@@ -352,14 +352,20 @@ void vmbus_on_msg_dpc(unsigned long data); ...@@ -352,14 +352,20 @@ void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *srv); int hv_kvp_init(struct hv_util_service *srv);
void hv_kvp_deinit(void); void hv_kvp_deinit(void);
int hv_kvp_pre_suspend(void);
int hv_kvp_pre_resume(void);
void hv_kvp_onchannelcallback(void *context); void hv_kvp_onchannelcallback(void *context);
int hv_vss_init(struct hv_util_service *srv); int hv_vss_init(struct hv_util_service *srv);
void hv_vss_deinit(void); void hv_vss_deinit(void);
int hv_vss_pre_suspend(void);
int hv_vss_pre_resume(void);
void hv_vss_onchannelcallback(void *context); void hv_vss_onchannelcallback(void *context);
int hv_fcopy_init(struct hv_util_service *srv); int hv_fcopy_init(struct hv_util_service *srv);
void hv_fcopy_deinit(void); void hv_fcopy_deinit(void);
int hv_fcopy_pre_suspend(void);
int hv_fcopy_pre_resume(void);
void hv_fcopy_onchannelcallback(void *context); void hv_fcopy_onchannelcallback(void *context);
void vmbus_initiate_unload(bool crash); void vmbus_initiate_unload(bool crash);
......
...@@ -1435,6 +1435,8 @@ struct hv_util_service { ...@@ -1435,6 +1435,8 @@ struct hv_util_service {
void (*util_cb)(void *); void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *); int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void); void (*util_deinit)(void);
int (*util_pre_suspend)(void);
int (*util_pre_resume)(void);
}; };
struct vmbuspipe_hdr { struct vmbuspipe_hdr {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment