Commit 8e50d392 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: idxd: Add shared workqueue support

Add shared workqueue support that includes the support of Shared Virtual
memory (SVM) or in similar terms On Demand Paging (ODP). The shared
workqueue uses the enqcmds command in kernel and will respond with retry if
the workqueue is full. Shared workqueue only works when there is PASID
support from the IOMMU.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Reviewed-by: default avatarTony Luck <tony.luck@intel.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Link: https://lore.kernel.org/r/160382007499.3911367.26043087963708134.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 212a93ca
...@@ -296,6 +296,16 @@ config INTEL_IDXD ...@@ -296,6 +296,16 @@ config INTEL_IDXD
If unsure, say N. If unsure, say N.
# Config symbol that collects all the dependencies that's necessary to
# support shared virtual memory for the devices supported by idxd.
config INTEL_IDXD_SVM
bool "Accelerator Shared Virtual Memory Support"
depends on INTEL_IDXD
depends on INTEL_IOMMU_SVM
depends on PCI_PRI
depends on PCI_PASID
depends on PCI_IOV
config INTEL_IOATDMA config INTEL_IOATDMA
tristate "Intel I/OAT DMA support" tristate "Intel I/OAT DMA support"
depends on PCI && X86_64 depends on PCI && X86_64
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h> #include <uapi/linux/idxd.h>
#include "registers.h" #include "registers.h"
#include "idxd.h" #include "idxd.h"
...@@ -32,7 +33,9 @@ static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { ...@@ -32,7 +33,9 @@ static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
struct idxd_user_context { struct idxd_user_context {
struct idxd_wq *wq; struct idxd_wq *wq;
struct task_struct *task; struct task_struct *task;
unsigned int pasid;
unsigned int flags; unsigned int flags;
struct iommu_sva *sva;
}; };
enum idxd_cdev_cleanup { enum idxd_cdev_cleanup {
...@@ -75,6 +78,8 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -75,6 +78,8 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq; struct idxd_wq *wq;
struct device *dev; struct device *dev;
int rc = 0; int rc = 0;
struct iommu_sva *sva;
unsigned int pasid;
wq = inode_wq(inode); wq = inode_wq(inode);
idxd = wq->idxd; idxd = wq->idxd;
...@@ -95,6 +100,34 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -95,6 +100,34 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq; ctx->wq = wq;
filp->private_data = ctx; filp->private_data = ctx;
if (device_pasid_enabled(idxd)) {
sva = iommu_sva_bind_device(dev, current->mm, NULL);
if (IS_ERR(sva)) {
rc = PTR_ERR(sva);
dev_err(dev, "pasid allocation failed: %d\n", rc);
goto failed;
}
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
goto failed;
}
ctx->sva = sva;
ctx->pasid = pasid;
if (wq_dedicated(wq)) {
rc = idxd_wq_set_pasid(wq, pasid);
if (rc < 0) {
iommu_sva_unbind_device(sva);
dev_err(dev, "wq set pasid failed: %d\n", rc);
goto failed;
}
}
}
idxd_wq_get(wq); idxd_wq_get(wq);
mutex_unlock(&wq->wq_lock); mutex_unlock(&wq->wq_lock);
return 0; return 0;
...@@ -111,13 +144,27 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) ...@@ -111,13 +144,27 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
struct idxd_wq *wq = ctx->wq; struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev; struct device *dev = &idxd->pdev->dev;
int rc;
dev_dbg(dev, "%s called\n", __func__); dev_dbg(dev, "%s called\n", __func__);
filep->private_data = NULL; filep->private_data = NULL;
/* Wait for in-flight operations to complete. */ /* Wait for in-flight operations to complete. */
if (wq_shared(wq)) {
idxd_device_drain_pasid(idxd, ctx->pasid);
} else {
if (device_pasid_enabled(idxd)) {
/* The wq disable in the disable pasid function will drain the wq */
rc = idxd_wq_disable_pasid(wq);
if (rc < 0)
dev_err(dev, "wq disable pasid failed.\n");
} else {
idxd_wq_drain(wq); idxd_wq_drain(wq);
}
}
if (ctx->sva)
iommu_sva_unbind_device(ctx->sva);
kfree(ctx); kfree(ctx);
mutex_lock(&wq->wq_lock); mutex_lock(&wq->wq_lock);
idxd_wq_put(wq); idxd_wq_put(wq);
......
...@@ -273,10 +273,9 @@ int idxd_wq_map_portal(struct idxd_wq *wq) ...@@ -273,10 +273,9 @@ int idxd_wq_map_portal(struct idxd_wq *wq)
start = pci_resource_start(pdev, IDXD_WQ_BAR); start = pci_resource_start(pdev, IDXD_WQ_BAR);
start = start + wq->id * IDXD_PORTAL_SIZE; start = start + wq->id * IDXD_PORTAL_SIZE;
wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
if (!wq->dportal) if (!wq->portal)
return -ENOMEM; return -ENOMEM;
dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
return 0; return 0;
} }
...@@ -285,7 +284,61 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq) ...@@ -285,7 +284,61 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
{ {
struct device *dev = &wq->idxd->pdev->dev; struct device *dev = &wq->idxd->pdev->dev;
devm_iounmap(dev, wq->dportal); devm_iounmap(dev, wq->portal);
}
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
int rc;
union wqcfg wqcfg;
unsigned int offset;
unsigned long flags;
rc = idxd_wq_disable(wq);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
spin_lock_irqsave(&idxd->dev_lock, flags);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1;
wqcfg.pasid = pasid;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
rc = idxd_wq_enable(wq);
if (rc < 0)
return rc;
return 0;
}
int idxd_wq_disable_pasid(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
int rc;
union wqcfg wqcfg;
unsigned int offset;
unsigned long flags;
rc = idxd_wq_disable(wq);
if (rc < 0)
return rc;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
spin_lock_irqsave(&idxd->dev_lock, flags);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 0;
wqcfg.pasid = 0;
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
spin_unlock_irqrestore(&idxd->dev_lock, flags);
rc = idxd_wq_enable(wq);
if (rc < 0)
return rc;
return 0;
} }
void idxd_wq_disable_cleanup(struct idxd_wq *wq) void idxd_wq_disable_cleanup(struct idxd_wq *wq)
...@@ -468,6 +521,17 @@ void idxd_device_reset(struct idxd_device *idxd) ...@@ -468,6 +521,17 @@ void idxd_device_reset(struct idxd_device *idxd)
spin_unlock_irqrestore(&idxd->dev_lock, flags); spin_unlock_irqrestore(&idxd->dev_lock, flags);
} }
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
{
struct device *dev = &idxd->pdev->dev;
u32 operand;
operand = pasid;
dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
dev_dbg(dev, "pasid %d drained\n", pasid);
}
/* Device configuration bits */ /* Device configuration bits */
static void idxd_group_config_write(struct idxd_group *group) static void idxd_group_config_write(struct idxd_group *group)
{ {
...@@ -554,9 +618,21 @@ static int idxd_wq_config_write(struct idxd_wq *wq) ...@@ -554,9 +618,21 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
/* byte 8-11 */ /* byte 8-11 */
wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq))
wq->wqcfg->mode = 1; wq->wqcfg->mode = 1;
if (device_pasid_enabled(idxd)) {
wq->wqcfg->pasid_en = 1;
if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
wq->wqcfg->pasid = idxd->pasid;
}
wq->wqcfg->priority = wq->priority; wq->wqcfg->priority = wq->priority;
if (idxd->hw.gen_cap.block_on_fault &&
test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
wq->wqcfg->bof = 1;
/* bytes 12-15 */ /* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
...@@ -664,8 +740,8 @@ static int idxd_wqs_setup(struct idxd_device *idxd) ...@@ -664,8 +740,8 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->size) if (!wq->size)
continue; continue;
if (!wq_dedicated(wq)) { if (wq_shared(wq) && !device_swq_supported(idxd)) {
dev_warn(dev, "No shared workqueue support.\n"); dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -61,8 +61,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, ...@@ -61,8 +61,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
u64 addr_f1, u64 addr_f2, u64 len, u64 addr_f1, u64 addr_f2, u64 len,
u64 compl, u32 flags) u64 compl, u32 flags)
{ {
struct idxd_device *idxd = wq->idxd;
hw->flags = flags; hw->flags = flags;
hw->opcode = opcode; hw->opcode = opcode;
hw->src_addr = addr_f1; hw->src_addr = addr_f1;
...@@ -70,13 +68,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, ...@@ -70,13 +68,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->xfer_size = len; hw->xfer_size = len;
hw->priv = !!(wq->type == IDXD_WQT_KERNEL); hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
hw->completion_addr = compl; hw->completion_addr = compl;
/*
* Descriptor completion vectors are 1-8 for MSIX. We will round
* robin through the 8 vectors.
*/
wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
hw->int_handle = wq->vec_ptr;
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
......
...@@ -59,6 +59,7 @@ enum idxd_wq_state { ...@@ -59,6 +59,7 @@ enum idxd_wq_state {
enum idxd_wq_flag { enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0, WQ_FLAG_DEDICATED = 0,
WQ_FLAG_BLOCK_ON_FAULT,
}; };
enum idxd_wq_type { enum idxd_wq_type {
...@@ -86,10 +87,11 @@ enum idxd_op_type { ...@@ -86,10 +87,11 @@ enum idxd_op_type {
enum idxd_complete_type { enum idxd_complete_type {
IDXD_COMPLETE_NORMAL = 0, IDXD_COMPLETE_NORMAL = 0,
IDXD_COMPLETE_ABORT, IDXD_COMPLETE_ABORT,
IDXD_COMPLETE_DEV_FAIL,
}; };
struct idxd_wq { struct idxd_wq {
void __iomem *dportal; void __iomem *portal;
struct device conf_dev; struct device conf_dev;
struct idxd_cdev idxd_cdev; struct idxd_cdev idxd_cdev;
struct idxd_device *idxd; struct idxd_device *idxd;
...@@ -145,6 +147,7 @@ enum idxd_device_state { ...@@ -145,6 +147,7 @@ enum idxd_device_state {
enum idxd_device_flag { enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0, IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING, IDXD_FLAG_CMD_RUNNING,
IDXD_FLAG_PASID_ENABLED,
}; };
struct idxd_device { struct idxd_device {
...@@ -167,6 +170,9 @@ struct idxd_device { ...@@ -167,6 +170,9 @@ struct idxd_device {
struct idxd_wq *wqs; struct idxd_wq *wqs;
struct idxd_engine *engines; struct idxd_engine *engines;
struct iommu_sva *sva;
unsigned int pasid;
int num_groups; int num_groups;
u32 msix_perm_offset; u32 msix_perm_offset;
...@@ -215,11 +221,28 @@ struct idxd_desc { ...@@ -215,11 +221,28 @@ struct idxd_desc {
extern struct bus_type dsa_bus_type; extern struct bus_type dsa_bus_type;
extern bool support_enqcmd;
static inline bool wq_dedicated(struct idxd_wq *wq) static inline bool wq_dedicated(struct idxd_wq *wq)
{ {
return test_bit(WQ_FLAG_DEDICATED, &wq->flags); return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
} }
static inline bool wq_shared(struct idxd_wq *wq)
{
return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}
static inline bool device_pasid_enabled(struct idxd_device *idxd)
{
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
static inline bool device_swq_supported(struct idxd_device *idxd)
{
return (support_enqcmd && device_pasid_enabled(idxd));
}
enum idxd_portal_prot { enum idxd_portal_prot {
IDXD_PORTAL_UNLIMITED = 0, IDXD_PORTAL_UNLIMITED = 0,
IDXD_PORTAL_LIMITED, IDXD_PORTAL_LIMITED,
...@@ -288,6 +311,7 @@ void idxd_device_reset(struct idxd_device *idxd); ...@@ -288,6 +311,7 @@ void idxd_device_reset(struct idxd_device *idxd);
void idxd_device_cleanup(struct idxd_device *idxd); void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd); int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd); void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
/* work queue control */ /* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq); int idxd_wq_alloc_resources(struct idxd_wq *wq);
...@@ -298,6 +322,8 @@ void idxd_wq_drain(struct idxd_wq *wq); ...@@ -298,6 +322,8 @@ void idxd_wq_drain(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq); int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq); void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
/* submission */ /* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/intel-svm.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h> #include <uapi/linux/idxd.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include "../dmaengine.h" #include "../dmaengine.h"
...@@ -26,6 +28,8 @@ MODULE_AUTHOR("Intel Corporation"); ...@@ -26,6 +28,8 @@ MODULE_AUTHOR("Intel Corporation");
#define DRV_NAME "idxd" #define DRV_NAME "idxd"
bool support_enqcmd;
static struct idr idxd_idrs[IDXD_TYPE_MAX]; static struct idr idxd_idrs[IDXD_TYPE_MAX];
static struct mutex idxd_idr_lock; static struct mutex idxd_idr_lock;
...@@ -53,6 +57,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) ...@@ -53,6 +57,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry; struct idxd_irq_entry *irq_entry;
int i, msixcnt; int i, msixcnt;
int rc = 0; int rc = 0;
union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev); msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) { if (msixcnt < 0) {
...@@ -131,6 +136,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) ...@@ -131,6 +136,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
idxd_unmask_error_interrupts(idxd); idxd_unmask_error_interrupts(idxd);
/* Setup MSIX permission table */
mperm.bits = 0;
mperm.pasid = idxd->pasid;
mperm.pasid_en = device_pasid_enabled(idxd);
for (i = 1; i < msixcnt; i++)
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
return 0; return 0;
err_no_irq: err_no_irq:
...@@ -265,8 +277,7 @@ static void idxd_read_caps(struct idxd_device *idxd) ...@@ -265,8 +277,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
} }
} }
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
void __iomem * const *iomap)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct idxd_device *idxd; struct idxd_device *idxd;
...@@ -276,12 +287,45 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, ...@@ -276,12 +287,45 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
return NULL; return NULL;
idxd->pdev = pdev; idxd->pdev = pdev;
idxd->reg_base = iomap[IDXD_MMIO_BAR];
spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->dev_lock);
return idxd; return idxd;
} }
static int idxd_enable_system_pasid(struct idxd_device *idxd)
{
int flags;
unsigned int pasid;
struct iommu_sva *sva;
flags = SVM_FLAG_SUPERVISOR_MODE;
sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
if (IS_ERR(sva)) {
dev_warn(&idxd->pdev->dev,
"iommu sva bind failed: %ld\n", PTR_ERR(sva));
return PTR_ERR(sva);
}
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
return -ENODEV;
}
idxd->sva = sva;
idxd->pasid = pasid;
dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
return 0;
}
static void idxd_disable_system_pasid(struct idxd_device *idxd)
{
iommu_sva_unbind_device(idxd->sva);
idxd->sva = NULL;
}
static int idxd_probe(struct idxd_device *idxd) static int idxd_probe(struct idxd_device *idxd)
{ {
struct pci_dev *pdev = idxd->pdev; struct pci_dev *pdev = idxd->pdev;
...@@ -292,6 +336,14 @@ static int idxd_probe(struct idxd_device *idxd) ...@@ -292,6 +336,14 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_device_init_reset(idxd); idxd_device_init_reset(idxd);
dev_dbg(dev, "IDXD reset complete\n"); dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
rc = idxd_enable_system_pasid(idxd);
if (rc < 0)
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
idxd_read_caps(idxd); idxd_read_caps(idxd);
idxd_read_table_offsets(idxd); idxd_read_table_offsets(idxd);
...@@ -322,29 +374,29 @@ static int idxd_probe(struct idxd_device *idxd) ...@@ -322,29 +374,29 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_mask_error_interrupts(idxd); idxd_mask_error_interrupts(idxd);
idxd_mask_msix_vectors(idxd); idxd_mask_msix_vectors(idxd);
err_setup: err_setup:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
return rc; return rc;
} }
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
void __iomem * const *iomap;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct idxd_device *idxd; struct idxd_device *idxd;
int rc; int rc;
unsigned int mask;
rc = pcim_enable_device(pdev); rc = pcim_enable_device(pdev);
if (rc) if (rc)
return rc; return rc;
dev_dbg(dev, "Mapping BARs\n"); dev_dbg(dev, "Alloc IDXD context\n");
mask = (1 << IDXD_MMIO_BAR); idxd = idxd_alloc(pdev);
rc = pcim_iomap_regions(pdev, mask, DRV_NAME); if (!idxd)
if (rc) return -ENOMEM;
return rc;
iomap = pcim_iomap_table(pdev); dev_dbg(dev, "Mapping BARs\n");
if (!iomap) idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
if (!idxd->reg_base)
return -ENOMEM; return -ENOMEM;
dev_dbg(dev, "Set DMA masks\n"); dev_dbg(dev, "Set DMA masks\n");
...@@ -360,11 +412,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -360,11 +412,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc) if (rc)
return rc; return rc;
dev_dbg(dev, "Alloc IDXD context\n");
idxd = idxd_alloc(pdev, iomap);
if (!idxd)
return -ENOMEM;
idxd_set_type(idxd); idxd_set_type(idxd);
dev_dbg(dev, "Set PCI master\n"); dev_dbg(dev, "Set PCI master\n");
...@@ -452,6 +499,8 @@ static void idxd_remove(struct pci_dev *pdev) ...@@ -452,6 +499,8 @@ static void idxd_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "%s called\n", __func__); dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_cleanup_sysfs(idxd); idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev); idxd_shutdown(pdev);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
mutex_lock(&idxd_idr_lock); mutex_lock(&idxd_idr_lock);
idr_remove(&idxd_idrs[idxd->type], idxd->id); idr_remove(&idxd_idrs[idxd->type], idxd->id);
mutex_unlock(&idxd_idr_lock); mutex_unlock(&idxd_idr_lock);
...@@ -470,7 +519,7 @@ static int __init idxd_init_module(void) ...@@ -470,7 +519,7 @@ static int __init idxd_init_module(void)
int err, i; int err, i;
/* /*
* If the CPU does not support write512, there's no point in * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it. * enumerating the device. We can not utilize it.
*/ */
if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
...@@ -478,8 +527,10 @@ static int __init idxd_init_module(void) ...@@ -478,8 +527,10 @@ static int __init idxd_init_module(void)
return -ENODEV; return -ENODEV;
} }
pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", if (!boot_cpu_has(X86_FEATURE_ENQCMD))
DRV_NAME, IDXD_DRIVER_VERSION); pr_warn("Platform does not have ENQCMD(S) support.\n");
else
support_enqcmd = true;
mutex_init(&idxd_idr_lock); mutex_init(&idxd_idr_lock);
for (i = 0; i < IDXD_TYPE_MAX; i++) for (i = 0; i < IDXD_TYPE_MAX; i++)
......
...@@ -336,6 +336,8 @@ union wqcfg { ...@@ -336,6 +336,8 @@ union wqcfg {
u32 bits[8]; u32 bits[8];
} __packed; } __packed;
#define WQCFG_PASID_IDX 2
/* /*
* This macro calculates the offset into the WQCFG register * This macro calculates the offset into the WQCFG register
* idxd - struct idxd * * idxd - struct idxd *
......
...@@ -11,11 +11,22 @@ ...@@ -11,11 +11,22 @@
static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
{ {
struct idxd_desc *desc; struct idxd_desc *desc;
struct idxd_device *idxd = wq->idxd;
desc = wq->descs[idx]; desc = wq->descs[idx];
memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
memset(desc->completion, 0, sizeof(struct dsa_completion_record)); memset(desc->completion, 0, sizeof(struct dsa_completion_record));
desc->cpu = cpu; desc->cpu = cpu;
if (device_pasid_enabled(idxd))
desc->hw->pasid = idxd->pasid;
/*
* Descriptor completion vectors are 1-8 for MSIX. We will round
* robin through the 8 vectors.
*/
wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
desc->hw->int_handle = wq->vec_ptr;
return desc; return desc;
} }
...@@ -70,18 +81,32 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) ...@@ -70,18 +81,32 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
int vec = desc->hw->int_handle; int vec = desc->hw->int_handle;
void __iomem *portal; void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED) if (idxd->state != IDXD_DEV_ENABLED)
return -EIO; return -EIO;
portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED); portal = wq->portal + idxd_get_wq_portal_offset(IDXD_PORTAL_LIMITED);
/* /*
* The wmb() flushes writes to coherent DMA data before possibly * The wmb() flushes writes to coherent DMA data before
* triggering a DMA read. The wmb() is necessary even on UP because * possibly triggering a DMA read. The wmb() is necessary
* the recipient is a device. * even on UP because the recipient is a device.
*/ */
wmb(); wmb();
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1); iosubmit_cmds512(portal, desc->hw, 1);
} else {
/*
* It's not likely that we would receive queue full rejection
* since the descriptor allocation gates at wq size. If we
* receive a -EAGAIN, that means something went wrong such as the
* device is not accepting descriptor at all.
*/
rc = enqcmds(portal, desc->hw);
if (rc < 0)
return rc;
}
/* /*
* Pending the descriptor to the lockless list for the irq_entry * Pending the descriptor to the lockless list for the irq_entry
......
...@@ -175,6 +175,30 @@ static int idxd_config_bus_probe(struct device *dev) ...@@ -175,6 +175,30 @@ static int idxd_config_bus_probe(struct device *dev)
return -EINVAL; return -EINVAL;
} }
/* Shared WQ checks */
if (wq_shared(wq)) {
if (!device_swq_supported(idxd)) {
dev_warn(dev,
"PASID not enabled and shared WQ.\n");
mutex_unlock(&wq->wq_lock);
return -ENXIO;
}
/*
* Shared wq with the threshold set to 0 means the user
* did not set the threshold or transitioned from a
* dedicated wq but did not set threshold. A value
* of 0 would effectively disable the shared wq. The
* driver does not allow a value of 0 to be set for
* threshold via sysfs.
*/
if (wq->threshold == 0) {
dev_warn(dev,
"Shared WQ and threshold 0.\n");
mutex_unlock(&wq->wq_lock);
return -EINVAL;
}
}
rc = idxd_wq_alloc_resources(wq); rc = idxd_wq_alloc_resources(wq);
if (rc < 0) { if (rc < 0) {
mutex_unlock(&wq->wq_lock); mutex_unlock(&wq->wq_lock);
...@@ -875,6 +899,8 @@ static ssize_t wq_mode_store(struct device *dev, ...@@ -875,6 +899,8 @@ static ssize_t wq_mode_store(struct device *dev,
if (sysfs_streq(buf, "dedicated")) { if (sysfs_streq(buf, "dedicated")) {
set_bit(WQ_FLAG_DEDICATED, &wq->flags); set_bit(WQ_FLAG_DEDICATED, &wq->flags);
wq->threshold = 0; wq->threshold = 0;
} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
} else { } else {
return -EINVAL; return -EINVAL;
} }
...@@ -973,6 +999,87 @@ static ssize_t wq_priority_store(struct device *dev, ...@@ -973,6 +999,87 @@ static ssize_t wq_priority_store(struct device *dev,
static struct device_attribute dev_attr_wq_priority = static struct device_attribute dev_attr_wq_priority =
__ATTR(priority, 0644, wq_priority_show, wq_priority_store); __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
static ssize_t wq_block_on_fault_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
return sprintf(buf, "%u\n",
test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
}
static ssize_t wq_block_on_fault_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
struct idxd_device *idxd = wq->idxd;
bool bof;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -ENXIO;
rc = kstrtobool(buf, &bof);
if (rc < 0)
return rc;
if (bof)
set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
else
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
return count;
}
static struct device_attribute dev_attr_wq_block_on_fault =
__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
wq_block_on_fault_store);
static ssize_t wq_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
return sprintf(buf, "%u\n", wq->threshold);
}
static ssize_t wq_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
struct idxd_device *idxd = wq->idxd;
unsigned int val;
int rc;
rc = kstrtouint(buf, 0, &val);
if (rc < 0)
return -EINVAL;
if (val > wq->size || val <= 0)
return -EINVAL;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -ENXIO;
if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
return -EINVAL;
wq->threshold = val;
return count;
}
static struct device_attribute dev_attr_wq_threshold =
__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
static ssize_t wq_type_show(struct device *dev, static ssize_t wq_type_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -1044,6 +1151,13 @@ static ssize_t wq_name_store(struct device *dev, ...@@ -1044,6 +1151,13 @@ static ssize_t wq_name_store(struct device *dev,
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
return -EINVAL; return -EINVAL;
/*
* This is temporarily placed here until we have SVM support for
* dmaengine.
*/
if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
return -EOPNOTSUPP;
memset(wq->name, 0, WQ_NAME_SIZE + 1); memset(wq->name, 0, WQ_NAME_SIZE + 1);
strncpy(wq->name, buf, WQ_NAME_SIZE); strncpy(wq->name, buf, WQ_NAME_SIZE);
strreplace(wq->name, '\n', '\0'); strreplace(wq->name, '\n', '\0');
...@@ -1154,6 +1268,8 @@ static struct attribute *idxd_wq_attributes[] = { ...@@ -1154,6 +1268,8 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_mode.attr, &dev_attr_wq_mode.attr,
&dev_attr_wq_size.attr, &dev_attr_wq_size.attr,
&dev_attr_wq_priority.attr, &dev_attr_wq_priority.attr,
&dev_attr_wq_block_on_fault.attr,
&dev_attr_wq_threshold.attr,
&dev_attr_wq_type.attr, &dev_attr_wq_type.attr,
&dev_attr_wq_name.attr, &dev_attr_wq_name.attr,
&dev_attr_wq_cdev_minor.attr, &dev_attr_wq_cdev_minor.attr,
...@@ -1305,6 +1421,16 @@ static ssize_t clients_show(struct device *dev, ...@@ -1305,6 +1421,16 @@ static ssize_t clients_show(struct device *dev,
} }
static DEVICE_ATTR_RO(clients); static DEVICE_ATTR_RO(clients);
static ssize_t pasid_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
}
static DEVICE_ATTR_RO(pasid_enabled);
static ssize_t state_show(struct device *dev, static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -1424,6 +1550,7 @@ static struct attribute *idxd_device_attributes[] = { ...@@ -1424,6 +1550,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_gen_cap.attr, &dev_attr_gen_cap.attr,
&dev_attr_configurable.attr, &dev_attr_configurable.attr,
&dev_attr_clients.attr, &dev_attr_clients.attr,
&dev_attr_pasid_enabled.attr,
&dev_attr_state.attr, &dev_attr_state.attr,
&dev_attr_errors.attr, &dev_attr_errors.attr,
&dev_attr_max_tokens.attr, &dev_attr_max_tokens.attr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment