Commit 43d1c6a6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "A bunch of driver fixes, notably:

   - uninit variable fix for dw-axi-dmac driver

   - return value check dw-edma driver

   - calling wq quiesce inside spinlock and missed completion for idxd
     driver

   - mod alias fix for st_fdma driver"

* tag 'dmaengine-fix-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dmaengine: st_fdma: fix MODULE_ALIAS
  dmaengine: idxd: fix missed completion on abort path
  dmaengine: ti: k3-udma: Fix smatch warnings
  dmaengine: idxd: fix calling wq quiesce inside spinlock
  dmaengine: dw-edma: Fix return value check for dma_set_mask_and_coherent()
  dmaengine: dw-axi-dmac: Fix uninitialized variable in axi_chan_block_xfer_start()
parents 4f549bf3 822c9f2b
...@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, ...@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first) struct axi_dma_desc *first)
{ {
u32 priority = chan->chip->dw->hdata->priority[chan->id]; u32 priority = chan->chip->dw->hdata->priority[chan->id];
struct axi_dma_chan_config config; struct axi_dma_chan_config config = {};
u32 irq_mask; u32 irq_mask;
u8 lms = 0; /* Select AXI0 master for LLI fetching */ u8 lms = 0; /* Select AXI0 master for LLI fetching */
...@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, ...@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
config.prior = priority; config.prior = priority;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
switch (chan->direction) { switch (chan->direction) {
case DMA_MEM_TO_DEV: case DMA_MEM_TO_DEV:
dw_axi_dma_set_byte_halfword(chan, true); dw_axi_dma_set_byte_halfword(chan, true);
......
...@@ -187,18 +187,10 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, ...@@ -187,18 +187,10 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* DMA configuration */ /* DMA configuration */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
pci_err(pdev, "DMA mask 64 set failed\n");
return err;
} else {
pci_err(pdev, "DMA mask 64 set failed\n");
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
pci_err(pdev, "DMA mask 32 set failed\n"); pci_err(pdev, "DMA mask 64 set failed\n");
return err; return err;
} }
}
/* Data structure allocation */ /* Data structure allocation */
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
......
...@@ -137,10 +137,10 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) ...@@ -137,10 +137,10 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
INIT_WORK(&idxd->work, idxd_device_reinit); INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work); queue_work(idxd->wq, &idxd->work);
} else { } else {
spin_lock(&idxd->dev_lock);
idxd->state = IDXD_DEV_HALTED; idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd); idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd); idxd_wqs_unmap_portal(idxd);
spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd); idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev, dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n", "idxd halted, need %s.\n",
......
...@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, ...@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{ {
struct idxd_desc *d, *t, *found = NULL; struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head; struct llist_node *head;
LIST_HEAD(flist);
desc->completion->status = IDXD_COMP_DESC_ABORT; desc->completion->status = IDXD_COMP_DESC_ABORT;
/* /*
...@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, ...@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
found = desc; found = desc;
continue; continue;
} }
list_add_tail(&desc->list, &ie->work_list);
if (d->completion->status)
list_add_tail(&d->list, &flist);
else
list_add_tail(&d->list, &ie->work_list);
} }
} }
...@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, ...@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (found) if (found)
complete_desc(found, IDXD_COMPLETE_ABORT); complete_desc(found, IDXD_COMPLETE_ABORT);
/*
* complete_desc() will return desc to allocator and the desc can be
* acquired by a different process and the desc->list can be modified.
* Delete desc from list so the list trasversing does not get corrupted
* by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
complete_desc(d, IDXD_COMPLETE_NORMAL);
}
} }
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
......
...@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2"); ...@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>"); MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>"); MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
MODULE_ALIAS("platform: " DRIVER_NAME); MODULE_ALIAS("platform:" DRIVER_NAME);
...@@ -4534,35 +4534,49 @@ static int udma_setup_resources(struct udma_dev *ud) ...@@ -4534,35 +4534,49 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt); bitmap_zero(ud->tchan_map, ud->tchan_cnt);
irq_res.sets = 1;
} else { } else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt); bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map, udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i], "tchan"); &rm_res->desc[i], "tchan");
}
irq_res.sets = rm_res->sets; irq_res.sets = rm_res->sets;
}
/* rchan and matching default flow ranges */ /* rchan and matching default flow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt); bitmap_zero(ud->rchan_map, ud->rchan_cnt);
irq_res.sets++;
} else { } else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt); bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map, udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i], "rchan"); &rm_res->desc[i], "rchan");
irq_res.sets += rm_res->sets;
} }
irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
if (!irq_res.desc)
return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
irq_res.desc[0].start = 0;
irq_res.desc[0].num = ud->tchan_cnt;
i = 1;
} else {
for (i = 0; i < rm_res->sets; i++) { for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start; irq_res.desc[i].start = rm_res->desc[i].start;
irq_res.desc[i].num = rm_res->desc[i].num; irq_res.desc[i].num = rm_res->desc[i].num;
irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
} }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = 0;
irq_res.desc[i].num = ud->rchan_cnt;
} else {
for (j = 0; j < rm_res->sets; j++, i++) { for (j = 0; j < rm_res->sets; j++, i++) {
if (rm_res->desc[j].num) { if (rm_res->desc[j].num) {
irq_res.desc[i].start = rm_res->desc[j].start + irq_res.desc[i].start = rm_res->desc[j].start +
...@@ -4575,6 +4589,7 @@ static int udma_setup_resources(struct udma_dev *ud) ...@@ -4575,6 +4589,7 @@ static int udma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
} }
} }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc); kfree(irq_res.desc);
if (ret) { if (ret) {
...@@ -4690,57 +4705,75 @@ static int bcdma_setup_resources(struct udma_dev *ud) ...@@ -4690,57 +4705,75 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
bitmap_zero(ud->bchan_map, ud->bchan_cnt); bitmap_zero(ud->bchan_map, ud->bchan_cnt);
irq_res.sets++;
} else { } else {
bitmap_fill(ud->bchan_map, ud->bchan_cnt); bitmap_fill(ud->bchan_map, ud->bchan_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->bchan_map, udma_mark_resource_ranges(ud, ud->bchan_map,
&rm_res->desc[i], &rm_res->desc[i],
"bchan"); "bchan");
}
irq_res.sets += rm_res->sets; irq_res.sets += rm_res->sets;
} }
}
/* tchan ranges */ /* tchan ranges */
if (ud->tchan_cnt) { if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt); bitmap_zero(ud->tchan_map, ud->tchan_cnt);
irq_res.sets += 2;
} else { } else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt); bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map, udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i], &rm_res->desc[i],
"tchan"); "tchan");
}
irq_res.sets += rm_res->sets * 2; irq_res.sets += rm_res->sets * 2;
} }
}
/* rchan ranges */ /* rchan ranges */
if (ud->rchan_cnt) { if (ud->rchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt); bitmap_zero(ud->rchan_map, ud->rchan_cnt);
irq_res.sets += 2;
} else { } else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt); bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map, udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i], &rm_res->desc[i],
"rchan"); "rchan");
}
irq_res.sets += rm_res->sets * 2; irq_res.sets += rm_res->sets * 2;
} }
}
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
if (!irq_res.desc)
return -ENOMEM;
if (ud->bchan_cnt) { if (ud->bchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
if (IS_ERR(rm_res)) {
irq_res.desc[0].start = oes->bcdma_bchan_ring;
irq_res.desc[0].num = ud->bchan_cnt;
i = 1;
} else {
for (i = 0; i < rm_res->sets; i++) { for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start + irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring; oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num; irq_res.desc[i].num = rm_res->desc[i].num;
} }
} }
}
if (ud->tchan_cnt) { if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = oes->bcdma_tchan_data;
irq_res.desc[i].num = ud->tchan_cnt;
irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = ud->tchan_cnt;
i += 2;
} else {
for (j = 0; j < rm_res->sets; j++, i += 2) { for (j = 0; j < rm_res->sets; j++, i += 2) {
irq_res.desc[i].start = rm_res->desc[j].start + irq_res.desc[i].start = rm_res->desc[j].start +
oes->bcdma_tchan_data; oes->bcdma_tchan_data;
...@@ -4751,8 +4784,16 @@ static int bcdma_setup_resources(struct udma_dev *ud) ...@@ -4751,8 +4784,16 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].num = rm_res->desc[j].num; irq_res.desc[i + 1].num = rm_res->desc[j].num;
} }
} }
}
if (ud->rchan_cnt) { if (ud->rchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = oes->bcdma_rchan_data;
irq_res.desc[i].num = ud->rchan_cnt;
irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = ud->rchan_cnt;
i += 2;
} else {
for (j = 0; j < rm_res->sets; j++, i += 2) { for (j = 0; j < rm_res->sets; j++, i += 2) {
irq_res.desc[i].start = rm_res->desc[j].start + irq_res.desc[i].start = rm_res->desc[j].start +
oes->bcdma_rchan_data; oes->bcdma_rchan_data;
...@@ -4763,6 +4804,7 @@ static int bcdma_setup_resources(struct udma_dev *ud) ...@@ -4763,6 +4804,7 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].num = rm_res->desc[j].num; irq_res.desc[i + 1].num = rm_res->desc[j].num;
} }
} }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc); kfree(irq_res.desc);
...@@ -4858,40 +4900,55 @@ static int pktdma_setup_resources(struct udma_dev *ud) ...@@ -4858,40 +4900,55 @@ static int pktdma_setup_resources(struct udma_dev *ud)
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
/* all rflows are assigned exclusively to Linux */ /* all rflows are assigned exclusively to Linux */
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
irq_res.sets = 1;
} else { } else {
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rflow_in_use, udma_mark_resource_ranges(ud, ud->rflow_in_use,
&rm_res->desc[i], "rflow"); &rm_res->desc[i], "rflow");
}
irq_res.sets = rm_res->sets; irq_res.sets = rm_res->sets;
}
/* tflow ranges */ /* tflow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
if (IS_ERR(rm_res)) { if (IS_ERR(rm_res)) {
/* all tflows are assigned exclusively to Linux */ /* all tflows are assigned exclusively to Linux */
bitmap_zero(ud->tflow_map, ud->tflow_cnt); bitmap_zero(ud->tflow_map, ud->tflow_cnt);
irq_res.sets++;
} else { } else {
bitmap_fill(ud->tflow_map, ud->tflow_cnt); bitmap_fill(ud->tflow_map, ud->tflow_cnt);
for (i = 0; i < rm_res->sets; i++) for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tflow_map, udma_mark_resource_ranges(ud, ud->tflow_map,
&rm_res->desc[i], "tflow"); &rm_res->desc[i], "tflow");
}
irq_res.sets += rm_res->sets; irq_res.sets += rm_res->sets;
}
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
if (!irq_res.desc)
return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
if (IS_ERR(rm_res)) {
irq_res.desc[0].start = oes->pktdma_tchan_flow;
irq_res.desc[0].num = ud->tflow_cnt;
i = 1;
} else {
for (i = 0; i < rm_res->sets; i++) { for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start + irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow; oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num; irq_res.desc[i].num = rm_res->desc[i].num;
} }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
if (IS_ERR(rm_res)) {
irq_res.desc[i].start = oes->pktdma_rchan_flow;
irq_res.desc[i].num = ud->rflow_cnt;
} else {
for (j = 0; j < rm_res->sets; j++, i++) { for (j = 0; j < rm_res->sets; j++, i++) {
irq_res.desc[i].start = rm_res->desc[j].start + irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow; oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num; irq_res.desc[i].num = rm_res->desc[j].num;
} }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc); kfree(irq_res.desc);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment