Commit 407bc8d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v5.9-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Inclusive naming updates (Alex Williamson)

 - Intel X550 INTx quirk (Alex Williamson)

 - Error path resched between unmaps (Xiang Zheng)

 - SPAPR IOMMU pin_user_pages() conversion (John Hubbard)

 - Trivial mutex simplification (Alex Williamson)

 - QAT device denylist (Giovanni Cabiddu)

 - type1 IOMMU ioctl refactor (Liu Yi L)

* tag 'vfio-v5.9-rc1' of git://github.com/awilliam/linux-vfio:
  vfio/type1: Refactor vfio_iommu_type1_ioctl()
  vfio/pci: Add QAT devices to denylist
  vfio/pci: Add device denylist
  PCI: Add Intel QuickAssist device IDs
  vfio/pci: Hold igate across releasing eventfd contexts
  vfio/spapr_tce: convert get_user_pages() --> pin_user_pages()
  vfio/type1: Add conditional rescheduling after iommu map failed
  vfio/pci: Add Intel X550 to hidden INTx devices
  vfio: Cleanup allowed driver naming
parents ea6ec774 ccd59dce
......@@ -60,6 +60,10 @@ module_param(enable_sriov, bool, 0644);
MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
#endif
static bool disable_denylist;
module_param(disable_denylist, bool, 0444);
MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
......@@ -69,6 +73,44 @@ static inline bool vfio_vga_disabled(void)
#endif
}
static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
{
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
return true;
default:
return false;
}
}
return false;
}
static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
{
if (!vfio_pci_dev_in_denylist(pdev))
return false;
if (disable_denylist) {
pci_warn(pdev,
"device denylist disabled - allowing device %04x:%04x.\n",
pdev->vendor, pdev->device);
return false;
}
pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n",
pdev->vendor, pdev->device);
return true;
}
/*
* Our VGA arbiter participation is limited since we don't know anything
* about the device itself. However, if the device is the only VGA device
......@@ -207,6 +249,8 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
case 0x1580 ... 0x1581:
case 0x1583 ... 0x158b:
case 0x37d0 ... 0x37d2:
/* X550 */
case 0x1563:
return true;
default:
return false;
......@@ -521,14 +565,12 @@ static void vfio_pci_release(void *device_data)
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
mutex_lock(&vdev->igate);
if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
vdev->err_trigger = NULL;
}
mutex_unlock(&vdev->igate);
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
vdev->req_trigger = NULL;
......@@ -1856,6 +1898,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct iommu_group *group;
int ret;
if (vfio_pci_is_denylisted(pdev))
return -EINVAL;
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
......@@ -2345,6 +2390,9 @@ static int __init vfio_pci_init(void)
vfio_pci_fill_ids();
if (disable_denylist)
pr_warn("device denylist disabled.\n");
return 0;
out_driver:
......
......@@ -627,9 +627,10 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
* that error notification via MSI can be affected for platforms that handle
* MSI within the same IOVA space as DMA.
*/
static const char * const vfio_driver_whitelist[] = { "pci-stub" };
static const char * const vfio_driver_allowed[] = { "pci-stub" };
static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
static bool vfio_dev_driver_allowed(struct device *dev,
struct device_driver *drv)
{
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
......@@ -638,8 +639,8 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
return true;
}
return match_string(vfio_driver_whitelist,
ARRAY_SIZE(vfio_driver_whitelist),
return match_string(vfio_driver_allowed,
ARRAY_SIZE(vfio_driver_allowed),
drv->name) >= 0;
}
......@@ -648,7 +649,7 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
* one of the following states:
* - driver-less
* - bound to a vfio driver
* - bound to a whitelisted driver
* - bound to an otherwise allowed driver
* - a PCI interconnect device
*
* We use two methods to determine whether a device is bound to a vfio
......@@ -674,7 +675,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
}
mutex_unlock(&group->unbound_lock);
if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
return 0;
device = vfio_group_get_device(group, dev);
......
......@@ -383,7 +383,7 @@ static void tce_iommu_unuse_page(struct tce_container *container,
struct page *page;
page = pfn_to_page(hpa >> PAGE_SHIFT);
put_page(page);
unpin_user_page(page);
}
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
......@@ -486,7 +486,7 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
struct page *page = NULL;
enum dma_data_direction direction = iommu_tce_direction(tce);
if (get_user_pages_fast(tce & PAGE_MASK, 1,
if (pin_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
&page) != 1)
return -EFAULT;
......
......@@ -1225,8 +1225,10 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
return 0;
unwind:
list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
cond_resched();
}
return ret;
}
......@@ -2453,6 +2455,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
return ret;
}
static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
unsigned long arg)
{
switch (arg) {
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_NESTING_IOMMU:
return 1;
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
return vfio_domains_have_iommu_cache(iommu);
default:
return 0;
}
}
static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
size_t size)
......@@ -2529,241 +2548,256 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
}
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_type1_info info;
unsigned long minsz;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
unsigned long capsz;
int ret;
if (cmd == VFIO_CHECK_EXTENSION) {
switch (arg) {
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_NESTING_IOMMU:
return 1;
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
return vfio_domains_have_iommu_cache(iommu);
default:
return 0;
}
} else if (cmd == VFIO_IOMMU_GET_INFO) {
struct vfio_iommu_type1_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
unsigned long capsz;
int ret;
minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
/* For backward compatibility, cannot require this */
capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
/* For backward compatibility, cannot require this */
capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
if (info.argsz < minsz)
return -EINVAL;
if (info.argsz >= capsz) {
minsz = capsz;
info.cap_offset = 0; /* output, no-recopy necessary */
}
if (info.argsz >= capsz) {
minsz = capsz;
info.cap_offset = 0; /* output, no-recopy necessary */
}
mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
info.iova_pgsizes = iommu->pgsize_bitmap;
info.iova_pgsizes = iommu->pgsize_bitmap;
ret = vfio_iommu_migration_build_caps(iommu, &caps);
ret = vfio_iommu_migration_build_caps(iommu, &caps);
if (!ret)
ret = vfio_iommu_iova_build_caps(iommu, &caps);
if (!ret)
ret = vfio_iommu_iova_build_caps(iommu, &caps);
mutex_unlock(&iommu->lock);
mutex_unlock(&iommu->lock);
if (ret)
return ret;
if (ret)
return ret;
if (caps.size) {
info.flags |= VFIO_IOMMU_INFO_CAPS;
if (caps.size) {
info.flags |= VFIO_IOMMU_INFO_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info.cap_offset = sizeof(info);
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
} else {
vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
kfree(caps.buf);
info.cap_offset = sizeof(info);
}
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
kfree(caps.buf);
}
} else if (cmd == VFIO_IOMMU_MAP_DMA) {
struct vfio_iommu_type1_dma_map map;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
VFIO_DMA_MAP_FLAG_WRITE;
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
}
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dma_map map;
unsigned long minsz;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
if (copy_from_user(&map, (void __user *)arg, minsz))
return -EFAULT;
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
if (map.argsz < minsz || map.flags & ~mask)
return -EINVAL;
if (copy_from_user(&map, (void __user *)arg, minsz))
return -EFAULT;
return vfio_dma_do_map(iommu, &map);
if (map.argsz < minsz || map.flags & ~mask)
return -EINVAL;
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
struct vfio_iommu_type1_dma_unmap unmap;
struct vfio_bitmap bitmap = { 0 };
int ret;
return vfio_dma_do_map(iommu, &map);
}
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dma_unmap unmap;
struct vfio_bitmap bitmap = { 0 };
unsigned long minsz;
int ret;
if (copy_from_user(&unmap, (void __user *)arg, minsz))
return -EFAULT;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
if (unmap.argsz < minsz ||
unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
return -EINVAL;
if (copy_from_user(&unmap, (void __user *)arg, minsz))
return -EFAULT;
if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
unsigned long pgshift;
if (unmap.argsz < minsz ||
unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
return -EINVAL;
if (unmap.argsz < (minsz + sizeof(bitmap)))
return -EINVAL;
if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
unsigned long pgshift;
if (copy_from_user(&bitmap,
(void __user *)(arg + minsz),
sizeof(bitmap)))
return -EFAULT;
if (unmap.argsz < (minsz + sizeof(bitmap)))
return -EINVAL;
if (!access_ok((void __user *)bitmap.data, bitmap.size))
return -EINVAL;
if (copy_from_user(&bitmap,
(void __user *)(arg + minsz),
sizeof(bitmap)))
return -EFAULT;
pgshift = __ffs(bitmap.pgsize);
ret = verify_bitmap_size(unmap.size >> pgshift,
bitmap.size);
if (ret)
return ret;
}
if (!access_ok((void __user *)bitmap.data, bitmap.size))
return -EINVAL;
ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
pgshift = __ffs(bitmap.pgsize);
ret = verify_bitmap_size(unmap.size >> pgshift,
bitmap.size);
if (ret)
return ret;
}
ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &unmap, minsz) ?
return copy_to_user((void __user *)arg, &unmap, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
struct vfio_iommu_type1_dirty_bitmap dirty;
uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
int ret = 0;
}
if (!iommu->v2)
return -EACCES;
static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dirty_bitmap dirty;
uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
unsigned long minsz;
int ret = 0;
minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
flags);
if (!iommu->v2)
return -EACCES;
if (copy_from_user(&dirty, (void __user *)arg, minsz))
return -EFAULT;
minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
if (dirty.argsz < minsz || dirty.flags & ~mask)
return -EINVAL;
if (copy_from_user(&dirty, (void __user *)arg, minsz))
return -EFAULT;
/* only one flag should be set at a time */
if (__ffs(dirty.flags) != __fls(dirty.flags))
return -EINVAL;
if (dirty.argsz < minsz || dirty.flags & ~mask)
return -EINVAL;
if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
size_t pgsize;
/* only one flag should be set at a time */
if (__ffs(dirty.flags) != __fls(dirty.flags))
return -EINVAL;
mutex_lock(&iommu->lock);
pgsize = 1 << __ffs(iommu->pgsize_bitmap);
if (!iommu->dirty_page_tracking) {
ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
if (!ret)
iommu->dirty_page_tracking = true;
}
mutex_unlock(&iommu->lock);
return ret;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
mutex_lock(&iommu->lock);
if (iommu->dirty_page_tracking) {
iommu->dirty_page_tracking = false;
vfio_dma_bitmap_free_all(iommu);
}
mutex_unlock(&iommu->lock);
return 0;
} else if (dirty.flags &
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
size_t iommu_pgsize;
if (!data_size || data_size < sizeof(range))
return -EINVAL;
if (copy_from_user(&range, (void __user *)(arg + minsz),
sizeof(range)))
return -EFAULT;
if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
size_t pgsize;
if (range.iova + range.size < range.iova)
return -EINVAL;
if (!access_ok((void __user *)range.bitmap.data,
range.bitmap.size))
return -EINVAL;
mutex_lock(&iommu->lock);
pgsize = 1 << __ffs(iommu->pgsize_bitmap);
if (!iommu->dirty_page_tracking) {
ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
if (!ret)
iommu->dirty_page_tracking = true;
}
mutex_unlock(&iommu->lock);
return ret;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
mutex_lock(&iommu->lock);
if (iommu->dirty_page_tracking) {
iommu->dirty_page_tracking = false;
vfio_dma_bitmap_free_all(iommu);
}
mutex_unlock(&iommu->lock);
return 0;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
size_t iommu_pgsize;
pgshift = __ffs(range.bitmap.pgsize);
ret = verify_bitmap_size(range.size >> pgshift,
range.bitmap.size);
if (ret)
return ret;
if (!data_size || data_size < sizeof(range))
return -EINVAL;
mutex_lock(&iommu->lock);
if (copy_from_user(&range, (void __user *)(arg + minsz),
sizeof(range)))
return -EFAULT;
iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
if (range.iova + range.size < range.iova)
return -EINVAL;
if (!access_ok((void __user *)range.bitmap.data,
range.bitmap.size))
return -EINVAL;
/* allow only smallest supported pgsize */
if (range.bitmap.pgsize != iommu_pgsize) {
ret = -EINVAL;
goto out_unlock;
}
if (range.iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (!range.size || range.size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
pgshift = __ffs(range.bitmap.pgsize);
ret = verify_bitmap_size(range.size >> pgshift,
range.bitmap.size);
if (ret)
return ret;
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
iommu, range.iova, range.size,
range.bitmap.pgsize);
else
ret = -EINVAL;
out_unlock:
mutex_unlock(&iommu->lock);
mutex_lock(&iommu->lock);
return ret;
iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
/* allow only smallest supported pgsize */
if (range.bitmap.pgsize != iommu_pgsize) {
ret = -EINVAL;
goto out_unlock;
}
if (range.iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (!range.size || range.size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
iommu, range.iova,
range.size,
range.bitmap.pgsize);
else
ret = -EINVAL;
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
return -ENOTTY;
return -EINVAL;
}
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
struct vfio_iommu *iommu = iommu_data;
switch (cmd) {
case VFIO_CHECK_EXTENSION:
return vfio_iommu_type1_check_extension(iommu, arg);
case VFIO_IOMMU_GET_INFO:
return vfio_iommu_type1_get_info(iommu, arg);
case VFIO_IOMMU_MAP_DMA:
return vfio_iommu_type1_map_dma(iommu, arg);
case VFIO_IOMMU_UNMAP_DMA:
return vfio_iommu_type1_unmap_dma(iommu, arg);
case VFIO_IOMMU_DIRTY_PAGES:
return vfio_iommu_type1_dirty_pages(iommu, arg);
default:
return -ENOTTY;
}
}
static int vfio_iommu_type1_register_notifier(void *iommu_data,
......
......@@ -2661,6 +2661,8 @@
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435
#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
......@@ -2710,6 +2712,8 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
......@@ -2926,6 +2930,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment