Commit e5d2f910 authored by Dexuan Cui's avatar Dexuan Cui Committed by David S. Miller

PCI: hv: Add a paravirtual backchannel in software

Windows SR-IOV provides a backchannel mechanism in software for communication
between a VF driver and a PF driver.  These "configuration blocks" are
similar in concept to PCI configuration space, but instead of doing reads and
writes in 32-bit chunks through a very slow path, packets of up to 128 bytes
can be sent or received asynchronously.

Nearly every SR-IOV device contains just such a communications channel in
hardware, so using this one in software is usually optional.  Using the
software channel, however, allows driver implementers to leverage software
tools that fuzz the communications channel looking for vulnerabilities.

The usage model for these packets puts the responsibility for reading or
writing on the VF driver.  The VF driver sends a read or a write packet,
indicating which "block" is being referred to by number.

If the PF driver wishes to initiate communication, it can "invalidate" one or
more of the first 64 blocks.  This invalidation is delivered via a callback
supplied by the VF driver by this driver.

No protocol is implied, except that supplied by the PF and VF drivers.
Signed-off-by: default avatarJake Oshins <jakeo@microsoft.com>
Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fed07ef3
...@@ -365,6 +365,39 @@ struct pci_delete_interrupt { ...@@ -365,6 +365,39 @@ struct pci_delete_interrupt {
struct tran_int_desc int_desc; struct tran_int_desc int_desc;
} __packed; } __packed;
/*
* Note: the VM must pass a valid block id, wslot and bytes_requested.
*/
struct pci_read_block {
struct pci_message message_type;
u32 block_id;
union win_slot_encoding wslot;
u32 bytes_requested;
} __packed;
struct pci_read_block_response {
struct vmpacket_descriptor hdr;
u32 status;
u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
} __packed;
/*
* Note: the VM must pass a valid block id, wslot and byte_count.
*/
struct pci_write_block {
struct pci_message message_type;
u32 block_id;
union win_slot_encoding wslot;
u32 byte_count;
u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
} __packed;
struct pci_dev_inval_block {
struct pci_incoming_message incoming;
union win_slot_encoding wslot;
u64 block_mask;
} __packed;
struct pci_dev_incoming { struct pci_dev_incoming {
struct pci_incoming_message incoming; struct pci_incoming_message incoming;
union win_slot_encoding wslot; union win_slot_encoding wslot;
...@@ -499,6 +532,9 @@ struct hv_pci_dev { ...@@ -499,6 +532,9 @@ struct hv_pci_dev {
struct hv_pcibus_device *hbus; struct hv_pcibus_device *hbus;
struct work_struct wrk; struct work_struct wrk;
void (*block_invalidate)(void *context, u64 block_mask);
void *invalidate_context;
/* /*
* What would be observed if one wrote 0xFFFFFFFF to a BAR and then * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
* read it back, for each of the BAR offsets within config space. * read it back, for each of the BAR offsets within config space.
...@@ -817,6 +853,256 @@ static struct pci_ops hv_pcifront_ops = { ...@@ -817,6 +853,256 @@ static struct pci_ops hv_pcifront_ops = {
.write = hv_pcifront_write_config, .write = hv_pcifront_write_config,
}; };
/*
* Paravirtual backchannel
*
* Hyper-V SR-IOV provides a backchannel mechanism in software for
* communication between a VF driver and a PF driver. These
* "configuration blocks" are similar in concept to PCI configuration space,
* but instead of doing reads and writes in 32-bit chunks through a very slow
* path, packets of up to 128 bytes can be sent or received asynchronously.
*
* Nearly every SR-IOV device contains just such a communications channel in
* hardware, so using this one in software is usually optional. Using the
* software channel, however, allows driver implementers to leverage software
* tools that fuzz the communications channel looking for vulnerabilities.
*
* The usage model for these packets puts the responsibility for reading or
* writing on the VF driver. The VF driver sends a read or a write packet,
* indicating which "block" is being referred to by number.
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
* supplied by the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
struct hv_read_config_compl {
struct hv_pci_compl comp_pkt;
void *buf;
unsigned int len;
unsigned int bytes_returned;
};
/**
* hv_pci_read_config_compl() - Invoked when a response packet
* for a read config block operation arrives.
* @context: Identifies the read config operation
* @resp: The response packet itself
* @resp_packet_size: Size in bytes of the response packet
*/
static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
int resp_packet_size)
{
struct hv_read_config_compl *comp = context;
struct pci_read_block_response *read_resp =
(struct pci_read_block_response *)resp;
unsigned int data_len, hdr_len;
hdr_len = offsetof(struct pci_read_block_response, bytes);
if (resp_packet_size < hdr_len) {
comp->comp_pkt.completion_status = -1;
goto out;
}
data_len = resp_packet_size - hdr_len;
if (data_len > 0 && read_resp->status == 0) {
comp->bytes_returned = min(comp->len, data_len);
memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
} else {
comp->bytes_returned = 0;
}
comp->comp_pkt.completion_status = read_resp->status;
out:
complete(&comp->comp_pkt.host_event);
}
/**
* hv_read_config_block() - Sends a read config block request to
* the back-end driver running in the Hyper-V parent partition.
* @pdev: The PCI driver's representation for this device.
* @buf: Buffer into which the config block will be copied.
* @len: Size in bytes of buf.
* @block_id: Identifies the config block which has been requested.
* @bytes_returned: Size which came back from the back-end driver.
*
* Return: 0 on success, -errno on failure
*/
int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
unsigned int block_id, unsigned int *bytes_returned)
{
struct hv_pcibus_device *hbus =
container_of(pdev->bus->sysdata, struct hv_pcibus_device,
sysdata);
struct {
struct pci_packet pkt;
char buf[sizeof(struct pci_read_block)];
} pkt;
struct hv_read_config_compl comp_pkt;
struct pci_read_block *read_blk;
int ret;
if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
return -EINVAL;
init_completion(&comp_pkt.comp_pkt.host_event);
comp_pkt.buf = buf;
comp_pkt.len = len;
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
read_blk = (struct pci_read_block *)&pkt.pkt.message;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
read_blk->bytes_requested = len;
ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
sizeof(*read_blk), (unsigned long)&pkt.pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
return ret;
ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
if (ret)
return ret;
if (comp_pkt.comp_pkt.completion_status != 0 ||
comp_pkt.bytes_returned == 0) {
dev_err(&hbus->hdev->device,
"Read Config Block failed: 0x%x, bytes_returned=%d\n",
comp_pkt.comp_pkt.completion_status,
comp_pkt.bytes_returned);
return -EIO;
}
*bytes_returned = comp_pkt.bytes_returned;
return 0;
}
EXPORT_SYMBOL(hv_read_config_block);
/**
* hv_pci_write_config_compl() - Invoked when a response packet for a write
* config block operation arrives.
* @context: Identifies the write config operation
* @resp: The response packet itself
* @resp_packet_size: Size in bytes of the response packet
*/
static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
int resp_packet_size)
{
struct hv_pci_compl *comp_pkt = context;
comp_pkt->completion_status = resp->status;
complete(&comp_pkt->host_event);
}
/**
* hv_write_config_block() - Sends a write config block request to the
* back-end driver running in the Hyper-V parent partition.
* @pdev: The PCI driver's representation for this device.
* @buf: Buffer from which the config block will be copied.
* @len: Size in bytes of buf.
* @block_id: Identifies the config block which is being written.
*
* Return: 0 on success, -errno on failure
*/
int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
unsigned int block_id)
{
struct hv_pcibus_device *hbus =
container_of(pdev->bus->sysdata, struct hv_pcibus_device,
sysdata);
struct {
struct pci_packet pkt;
char buf[sizeof(struct pci_write_block)];
u32 reserved;
} pkt;
struct hv_pci_compl comp_pkt;
struct pci_write_block *write_blk;
u32 pkt_size;
int ret;
if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
return -EINVAL;
init_completion(&comp_pkt.host_event);
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
write_blk = (struct pci_write_block *)&pkt.pkt.message;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
write_blk->byte_count = len;
memcpy(write_blk->bytes, buf, len);
pkt_size = offsetof(struct pci_write_block, bytes) + len;
/*
* This quirk is required on some hosts shipped around 2018, because
* these hosts don't check the pkt_size correctly (new hosts have been
* fixed since early 2019). The quirk is also safe on very old hosts
* and new hosts, because, on them, what really matters is the length
* specified in write_blk->byte_count.
*/
pkt_size += sizeof(pkt.reserved);
ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
(unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
return ret;
ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
if (ret)
return ret;
if (comp_pkt.completion_status != 0) {
dev_err(&hbus->hdev->device,
"Write Config Block failed: 0x%x\n",
comp_pkt.completion_status);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL(hv_write_config_block);
/**
* hv_register_block_invalidate() - Invoked when a config block invalidation
* arrives from the back-end driver.
* @pdev: The PCI driver's representation for this device.
* @context: Identifies the device.
* @block_invalidate: Identifies all of the blocks being invalidated.
*
* Return: 0 on success, -errno on failure
*/
int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
void (*block_invalidate)(void *context,
u64 block_mask))
{
struct hv_pcibus_device *hbus =
container_of(pdev->bus->sysdata, struct hv_pcibus_device,
sysdata);
struct hv_pci_dev *hpdev;
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
if (!hpdev)
return -ENODEV;
hpdev->block_invalidate = block_invalidate;
hpdev->invalidate_context = context;
put_pcichild(hpdev);
return 0;
}
EXPORT_SYMBOL(hv_register_block_invalidate);
/* Interrupt management hooks */ /* Interrupt management hooks */
static void hv_int_desc_free(struct hv_pci_dev *hpdev, static void hv_int_desc_free(struct hv_pci_dev *hpdev,
struct tran_int_desc *int_desc) struct tran_int_desc *int_desc)
...@@ -1968,6 +2254,7 @@ static void hv_pci_onchannelcallback(void *context) ...@@ -1968,6 +2254,7 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_response *response; struct pci_response *response;
struct pci_incoming_message *new_message; struct pci_incoming_message *new_message;
struct pci_bus_relations *bus_rel; struct pci_bus_relations *bus_rel;
struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message; struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev; struct hv_pci_dev *hpdev;
...@@ -2045,6 +2332,21 @@ static void hv_pci_onchannelcallback(void *context) ...@@ -2045,6 +2332,21 @@ static void hv_pci_onchannelcallback(void *context)
} }
break; break;
case PCI_INVALIDATE_BLOCK:
inval = (struct pci_dev_inval_block *)buffer;
hpdev = get_pcichild_wslot(hbus,
inval->wslot.slot);
if (hpdev) {
if (hpdev->block_invalidate) {
hpdev->block_invalidate(
hpdev->invalidate_context,
inval->block_mask);
}
put_pcichild(hpdev);
}
break;
default: default:
dev_warn(&hbus->hdev->device, dev_warn(&hbus->hdev->device,
"Unimplemented protocol message %x\n", "Unimplemented protocol message %x\n",
......
...@@ -1578,4 +1578,19 @@ hv_pkt_iter_next(struct vmbus_channel *channel, ...@@ -1578,4 +1578,19 @@ hv_pkt_iter_next(struct vmbus_channel *channel,
for (pkt = hv_pkt_iter_first(channel); pkt; \ for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt)) pkt = hv_pkt_iter_next(channel, pkt))
/*
* Functions for passing data between SR-IOV PF and VF drivers. The VF driver
* sends requests to read and write blocks. Each block must be 128 bytes or
* smaller. Optionally, the VF driver can register a callback function which
* will be invoked when the host says that one or more of the first 64 block
* IDs is "invalid" which means that the VF driver should reread them.
*/
#define HV_CONFIG_BLOCK_SIZE_MAX 128
int hv_read_config_block(struct pci_dev *dev, void *buf, unsigned int buf_len,
unsigned int block_id, unsigned int *bytes_returned);
int hv_write_config_block(struct pci_dev *dev, void *buf, unsigned int len,
unsigned int block_id);
int hv_register_block_invalidate(struct pci_dev *dev, void *context,
void (*block_invalidate)(void *context,
u64 block_mask));
#endif /* _HYPERV_H */ #endif /* _HYPERV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment