Commit 05e63c6a authored by Maxime Ripard's avatar Maxime Ripard

Merge branch 'virtio-shm' of...

Merge branch 'virtio-shm' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse into drm-misc-next

Topic pull request for core virtio changes that will be required by the DRM
driver.
Signed-off-by: default avatarMaxime Ripard <maxime@cerno.tech>
From: Gurchetan Singh <gurchetansingh@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/CAAfnVBn2BzXWFY3hhjDxd5q0P2_JWn-HdkVxgS94x9keAUZiow@mail.gmail.com
parents 2f0ddd89 38e89548
...@@ -498,6 +498,36 @@ static const char *vm_bus_name(struct virtio_device *vdev) ...@@ -498,6 +498,36 @@ static const char *vm_bus_name(struct virtio_device *vdev)
return vm_dev->pdev->name; return vm_dev->pdev->name;
} }
static bool vm_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
u64 len, addr;
/* Select the region we're interested in */
writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
/* Read the region size */
len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
region->len = len;
/* Check if region length is -1. If that's the case, the shared memory
* region does not exist and there is no need to proceed further.
*/
if (len == ~(u64)0)
return false;
/* Read the region base address */
addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
region->addr = addr;
return true;
}
static const struct virtio_config_ops virtio_mmio_config_ops = { static const struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get, .get = vm_get,
.set = vm_set, .set = vm_set,
...@@ -510,6 +540,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = { ...@@ -510,6 +540,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.get_features = vm_get_features, .get_features = vm_get_features,
.finalize_features = vm_finalize_features, .finalize_features = vm_finalize_features,
.bus_name = vm_bus_name, .bus_name = vm_bus_name,
.get_shm_region = vm_get_shm_region,
}; };
......
...@@ -444,6 +444,99 @@ static void del_vq(struct virtio_pci_vq_info *info) ...@@ -444,6 +444,99 @@ static void del_vq(struct virtio_pci_vq_info *info)
vring_del_virtqueue(vq); vring_del_virtqueue(vq);
} }
static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
u8 *bar, u64 *offset, u64 *len)
{
int pos;
for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
u8 type, cap_len, id;
u32 tmp32;
u64 res_offset, res_length;
pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
cfg_type), &type);
if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
continue;
pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
cap_len), &cap_len);
if (cap_len != sizeof(struct virtio_pci_cap64)) {
dev_err(&dev->dev, "%s: shm cap with bad size offset:"
" %d size: %d\n", __func__, pos, cap_len);
continue;
}
pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
id), &id);
if (id != required_id)
continue;
/* Type, and ID match, looks good */
pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
bar), bar);
/* Read the lower 32bit of length and offset */
pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
offset), &tmp32);
res_offset = tmp32;
pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
length), &tmp32);
res_length = tmp32;
/* and now the top half */
pci_read_config_dword(dev,
pos + offsetof(struct virtio_pci_cap64,
offset_hi), &tmp32);
res_offset |= ((u64)tmp32) << 32;
pci_read_config_dword(dev,
pos + offsetof(struct virtio_pci_cap64,
length_hi), &tmp32);
res_length |= ((u64)tmp32) << 32;
*offset = res_offset;
*len = res_length;
return pos;
}
return 0;
}
static bool vp_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct pci_dev *pci_dev = vp_dev->pci_dev;
u8 bar;
u64 offset, len;
phys_addr_t phys_addr;
size_t bar_len;
if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
return false;
phys_addr = pci_resource_start(pci_dev, bar);
bar_len = pci_resource_len(pci_dev, bar);
if ((offset + len) < offset) {
dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
__func__);
return false;
}
if (offset + len > bar_len) {
dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
__func__);
return false;
}
region->len = len;
region->addr = (u64) phys_addr + offset;
return true;
}
static const struct virtio_config_ops virtio_pci_config_nodev_ops = { static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL, .get = NULL,
.set = NULL, .set = NULL,
...@@ -458,6 +551,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { ...@@ -458,6 +551,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.bus_name = vp_bus_name, .bus_name = vp_bus_name,
.set_vq_affinity = vp_set_vq_affinity, .set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity, .get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
}; };
static const struct virtio_config_ops virtio_pci_config_ops = { static const struct virtio_config_ops virtio_pci_config_ops = {
...@@ -474,6 +568,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { ...@@ -474,6 +568,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.bus_name = vp_bus_name, .bus_name = vp_bus_name,
.set_vq_affinity = vp_set_vq_affinity, .set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity, .get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
}; };
/** /**
......
...@@ -11,6 +11,11 @@ ...@@ -11,6 +11,11 @@
struct irq_affinity; struct irq_affinity;
struct virtio_shm_region {
u64 addr;
u64 len;
};
/** /**
* virtio_config_ops - operations for configuring a virtio device * virtio_config_ops - operations for configuring a virtio device
* Note: Do not assume that a transport implements all of the operations * Note: Do not assume that a transport implements all of the operations
...@@ -66,6 +71,7 @@ struct irq_affinity; ...@@ -66,6 +71,7 @@ struct irq_affinity;
* the caller can then copy. * the caller can then copy.
* @set_vq_affinity: set the affinity for a virtqueue (optional). * @set_vq_affinity: set the affinity for a virtqueue (optional).
* @get_vq_affinity: get the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional).
* @get_shm_region: get a shared memory region based on the index.
*/ */
typedef void vq_callback_t(struct virtqueue *); typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops { struct virtio_config_ops {
...@@ -89,6 +95,8 @@ struct virtio_config_ops { ...@@ -89,6 +95,8 @@ struct virtio_config_ops {
const struct cpumask *cpu_mask); const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
int index); int index);
bool (*get_shm_region)(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id);
}; };
/* If driver didn't advertise the feature, it will never appear. */ /* If driver didn't advertise the feature, it will never appear. */
...@@ -251,6 +259,15 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask) ...@@ -251,6 +259,15 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
return 0; return 0;
} }
static inline
bool virtio_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
if (!vdev->config->get_shm_region)
return false;
return vdev->config->get_shm_region(vdev, region, id);
}
static inline bool virtio_is_little_endian(struct virtio_device *vdev) static inline bool virtio_is_little_endian(struct virtio_device *vdev)
{ {
return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
......
...@@ -122,6 +122,17 @@ ...@@ -122,6 +122,17 @@
#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 #define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 #define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
/* Shared memory region id */
#define VIRTIO_MMIO_SHM_SEL 0x0ac
/* Shared memory region length, 64 bits in two halves */
#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0
#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4
/* Shared memory region base address, 64 bits in two halves */
#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8
#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc
/* Configuration atomicity value */ /* Configuration atomicity value */
#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc #define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
......
...@@ -113,6 +113,8 @@ ...@@ -113,6 +113,8 @@
#define VIRTIO_PCI_CAP_DEVICE_CFG 4 #define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* PCI configuration access */ /* PCI configuration access */
#define VIRTIO_PCI_CAP_PCI_CFG 5 #define VIRTIO_PCI_CAP_PCI_CFG 5
/* Additional shared memory capability */
#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
/* This is the PCI capability header: */ /* This is the PCI capability header: */
struct virtio_pci_cap { struct virtio_pci_cap {
...@@ -121,11 +123,18 @@ struct virtio_pci_cap { ...@@ -121,11 +123,18 @@ struct virtio_pci_cap {
__u8 cap_len; /* Generic PCI field: capability length */ __u8 cap_len; /* Generic PCI field: capability length */
__u8 cfg_type; /* Identifies the structure. */ __u8 cfg_type; /* Identifies the structure. */
__u8 bar; /* Where to find it. */ __u8 bar; /* Where to find it. */
__u8 padding[3]; /* Pad to full dword. */ __u8 id; /* Multiple capabilities of the same type */
__u8 padding[2]; /* Pad to full dword. */
__le32 offset; /* Offset within bar. */ __le32 offset; /* Offset within bar. */
__le32 length; /* Length of the structure, in bytes. */ __le32 length; /* Length of the structure, in bytes. */
}; };
struct virtio_pci_cap64 {
struct virtio_pci_cap cap;
__le32 offset_hi; /* Most sig 32 bits of offset */
__le32 length_hi; /* Most sig 32 bits of length */
};
struct virtio_pci_notify_cap { struct virtio_pci_notify_cap {
struct virtio_pci_cap cap; struct virtio_pci_cap cap;
__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment