Commit ec0d6423 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: idxd: embed irq_entry in idxd_wq struct

With irq_entry already being associated with the wq in a 1:1 relationship,
embed the irq_entry in the idxd_wq struct and remove back pointers for
idxe_wq and idxd_device. In the process of this work, clean up the interrupt
handle assignment so that there's no decision to be made during submit
call on where interrupt handle value comes from. Set the interrupt handle
during irq request initialization time.

irq_entry 0 is designated as special and is tied to the device itself.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/163942148362.2412839.12055447853311267866.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 26e9baa8
......@@ -21,8 +21,11 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* Interrupt control bits */
void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
{
struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
struct idxd_irq_entry *ie;
struct irq_data *data;
ie = idxd_get_ie(idxd, vec_id);
data = irq_get_irq_data(ie->vector);
pci_msi_mask_irq(data);
}
......@@ -38,8 +41,11 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
{
struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
struct idxd_irq_entry *ie;
struct irq_data *data;
ie = idxd_get_ie(idxd, vec_id);
data = irq_get_irq_data(ie->vector);
pci_msi_unmask_irq(data);
}
......@@ -1216,13 +1222,6 @@ int __drv_enable_wq(struct idxd_wq *wq)
goto err;
}
/*
* Device has 1 misc interrupt and N interrupts for descriptor completion. To
* assign WQ to interrupt, we will take the N+1 interrupt since vector 0 is
* for the misc interrupt.
*/
wq->ie = &idxd->irq_entries[wq->id + 1];
rc = idxd_wq_enable(wq);
if (rc < 0) {
dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
......@@ -1273,7 +1272,6 @@ void __drv_disable_wq(struct idxd_wq *wq)
idxd_wq_drain(wq);
idxd_wq_reset(wq);
wq->ie = NULL;
wq->client_count = 0;
}
......
......@@ -70,7 +70,6 @@ extern struct idxd_device_driver idxd_user_drv;
#define INVALID_INT_HANDLE -1
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
int vector;
struct llist_head pending_llist;
......@@ -81,7 +80,6 @@ struct idxd_irq_entry {
*/
spinlock_t list_lock;
int int_handle;
struct idxd_wq *wq;
ioasid_t pasid;
};
......@@ -185,7 +183,7 @@ struct idxd_wq {
struct wait_queue_head err_queue;
struct idxd_device *idxd;
int id;
struct idxd_irq_entry *ie;
struct idxd_irq_entry ie;
enum idxd_wq_type type;
struct idxd_group *group;
int client_count;
......@@ -266,6 +264,7 @@ struct idxd_device {
int id;
int major;
u32 cmd_status;
struct idxd_irq_entry ie; /* misc irq, msix 0 */
struct pci_dev *pdev;
void __iomem *reg_base;
......@@ -302,8 +301,6 @@ struct idxd_device {
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
int num_wq_irqs;
struct idxd_irq_entry *irq_entries;
struct idxd_dma_dev *idxd_dma;
struct workqueue_struct *wq;
......@@ -395,6 +392,21 @@ static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
idev->type = type;
}
static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
{
return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
}
static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
{
return container_of(ie, struct idxd_wq, ie);
}
static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
{
return container_of(ie, struct idxd_device, ie);
}
extern struct bus_type dsa_bus_type;
extern bool support_enqcmd;
......
......@@ -72,7 +72,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
struct idxd_irq_entry *irq_entry;
struct idxd_irq_entry *ie;
int i, msixcnt;
int rc = 0;
......@@ -90,72 +90,54 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
/*
* We implement 1 completion list per MSI-X entry except for
* entry 0, which is for errors and others.
*/
idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
GFP_KERNEL, dev_to_node(dev));
if (!idxd->irq_entries) {
rc = -ENOMEM;
goto err_irq_entries;
}
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
/*
* Association of WQ should be assigned starting with irq_entry 1.
* irq_entry 0 is for misc interrupts and has no wq association
*/
if (i > 0)
idxd->irq_entries[i].wq = idxd->wqs[i - 1];
idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
idxd->irq_entries[i].int_handle = INVALID_INT_HANDLE;
if (device_pasid_enabled(idxd) && i > 0)
idxd->irq_entries[i].pasid = idxd->pasid;
else
idxd->irq_entries[i].pasid = INVALID_IOASID;
spin_lock_init(&idxd->irq_entries[i].list_lock);
}
idxd_msix_perm_setup(idxd);
irq_entry = &idxd->irq_entries[0];
rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
0, "idxd-misc", irq_entry);
ie = idxd_get_ie(idxd, 0);
ie->vector = pci_irq_vector(pdev, 0);
rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
goto err_misc_irq;
}
dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", ie->vector);
/* first MSI-X entry is not for wq interrupts */
idxd->num_wq_irqs = msixcnt - 1;
for (i = 0; i < idxd->max_wqs; i++) {
int msix_idx = i + 1;
ie = idxd_get_ie(idxd, msix_idx);
for (i = 1; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
/* MSIX vector 0 special, wq irq entry starts at 1 */
ie->id = msix_idx;
ie->vector = pci_irq_vector(pdev, msix_idx);
ie->int_handle = INVALID_INT_HANDLE;
if (device_pasid_enabled(idxd) && i > 0)
ie->pasid = idxd->pasid;
else
ie->pasid = INVALID_IOASID;
spin_lock_init(&ie->list_lock);
init_llist_head(&ie->pending_llist);
INIT_LIST_HEAD(&ie->work_list);
init_llist_head(&idxd->irq_entries[i].pending_llist);
INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
rc = request_threaded_irq(irq_entry->vector, NULL,
idxd_wq_thread, 0, "idxd-portal", irq_entry);
rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
if (rc < 0) {
dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
dev_err(dev, "Failed to allocate irq %d.\n", ie->vector);
goto err_wq_irqs;
}
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, ie->vector);
if (idxd->request_int_handles) {
rc = idxd_device_request_int_handle(idxd, i, &irq_entry->int_handle,
rc = idxd_device_request_int_handle(idxd, i, &ie->int_handle,
IDXD_IRQ_MSIX);
if (rc < 0) {
free_irq(irq_entry->vector, irq_entry);
free_irq(ie->vector, ie);
goto err_wq_irqs;
}
dev_dbg(dev, "int handle requested: %u\n", irq_entry->int_handle);
dev_dbg(dev, "int handle requested: %u\n", ie->int_handle);
} else {
ie->int_handle = msix_idx;
}
}
idxd_unmask_error_interrupts(idxd);
......@@ -163,23 +145,19 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
err_wq_irqs:
while (--i >= 0) {
irq_entry = &idxd->irq_entries[i];
free_irq(irq_entry->vector, irq_entry);
if (irq_entry->int_handle != INVALID_INT_HANDLE) {
idxd_device_release_int_handle(idxd, irq_entry->int_handle,
IDXD_IRQ_MSIX);
irq_entry->int_handle = INVALID_INT_HANDLE;
irq_entry->pasid = INVALID_IOASID;
ie = &idxd->wqs[i]->ie;
free_irq(ie->vector, ie);
if (ie->int_handle != INVALID_INT_HANDLE) {
idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
ie->int_handle = INVALID_INT_HANDLE;
ie->pasid = INVALID_IOASID;
}
irq_entry->vector = -1;
irq_entry->wq = NULL;
irq_entry->idxd = NULL;
ie->vector = -1;
}
err_misc_irq:
/* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
idxd_msix_perm_clear(idxd);
err_irq_entries:
pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
......@@ -188,21 +166,18 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct idxd_irq_entry *irq_entry;
struct idxd_irq_entry *ie;
int i;
for (i = 0; i < idxd->irq_cnt; i++) {
irq_entry = &idxd->irq_entries[i];
if (irq_entry->int_handle != INVALID_INT_HANDLE) {
idxd_device_release_int_handle(idxd, irq_entry->int_handle,
IDXD_IRQ_MSIX);
irq_entry->int_handle = INVALID_INT_HANDLE;
irq_entry->pasid = INVALID_IOASID;
ie = idxd_get_ie(idxd, i);
if (ie->int_handle != INVALID_INT_HANDLE) {
idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
ie->int_handle = INVALID_INT_HANDLE;
ie->pasid = INVALID_IOASID;
}
irq_entry->vector = -1;
irq_entry->wq = NULL;
irq_entry->idxd = NULL;
free_irq(irq_entry->vector, irq_entry);
free_irq(ie->vector, ie);
ie->vector = -1;
}
idxd_mask_error_interrupts(idxd);
......@@ -755,7 +730,7 @@ static void idxd_release_int_handles(struct idxd_device *idxd)
int i, rc;
for (i = 1; i < idxd->irq_cnt; i++) {
struct idxd_irq_entry *ie = &idxd->irq_entries[i];
struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
if (ie->int_handle != INVALID_INT_HANDLE) {
rc = idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
......@@ -783,7 +758,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
idxd_mask_error_interrupts(idxd);
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
irq_entry = idxd_get_ie(idxd, i);
synchronize_irq(irq_entry->vector);
if (i == 0)
continue;
......@@ -815,7 +790,7 @@ static void idxd_remove(struct pci_dev *pdev)
idxd_disable_system_pasid(idxd);
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
irq_entry = idxd_get_ie(idxd, i);
free_irq(irq_entry->vector, irq_entry);
}
idxd_msix_perm_clear(idxd);
......
......@@ -73,8 +73,8 @@ static void idxd_device_reinit(struct work_struct *work)
*/
static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie)
{
struct idxd_wq *wq = ie->wq;
struct idxd_device *idxd = ie->idxd;
struct idxd_wq *wq = ie_to_wq(ie);
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
struct dsa_hw_desc desc = {};
void __iomem *portal;
......@@ -155,8 +155,8 @@ static void idxd_int_handle_revoke(struct work_struct *work)
* at the end to make sure all invalid int handle descriptors are processed.
*/
for (i = 1; i < idxd->irq_cnt; i++) {
struct idxd_irq_entry *ie = &idxd->irq_entries[i];
struct idxd_wq *wq = ie->wq;
struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
struct idxd_wq *wq = ie_to_wq(ie);
rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
if (rc < 0) {
......@@ -338,7 +338,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = irq_entry->idxd;
struct idxd_device *idxd = ie_to_idxd(irq_entry);
int rc;
u32 cause;
......
......@@ -193,12 +193,8 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* that we designated the descriptor to.
*/
if (desc_flags & IDXD_OP_FLAG_RCI) {
ie = wq->ie;
if (ie->int_handle == INVALID_INT_HANDLE)
desc->hw->int_handle = ie->id;
else
desc->hw->int_handle = ie->int_handle;
ie = &wq->ie;
desc->hw->int_handle = ie->int_handle;
llist_add(&desc->llnode, &ie->pending_llist);
}
......
......@@ -1304,7 +1304,6 @@ static void idxd_conf_device_release(struct device *dev)
kfree(idxd->groups);
kfree(idxd->wqs);
kfree(idxd->engines);
kfree(idxd->irq_entries);
ida_free(&idxd_ida, idxd->id);
kfree(idxd);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment