Commit d57dd2d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix2-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:

 - dw-edma fixes to improve driver and remote HDMA setup

 - fsl-edma fixes for SoC hange, irq init and byte calculations and
   sparse fixes

 - idxd: safe user copy of completion record fix

 - ptdma: consistent DMA mask fix

* tag 'dmaengine-fix2-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dmaengine: ptdma: use consistent DMA masks
  dmaengine: fsl-qdma: add __iomem and struct in union to fix sparse warning
  dmaengine: idxd: Ensure safe user copy of completion record
  dmaengine: fsl-edma: correct max_segment_size setting
  dmaengine: idxd: Remove shadow Event Log head stored in idxd
  dmaengine: fsl-edma: correct calculation of 'nbytes' in multi-fifo scenario
  dmaengine: fsl-qdma: init irq after reg initialization
  dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read
  dmaengine: dw-edma: eDMA: Add sync read before starting the DMA transfer in remote setup
  dmaengine: dw-edma: HDMA: Add sync read before starting the DMA transfer in remote setup
  dmaengine: dw-edma: Add HDMA remote interrupt configuration
  dmaengine: dw-edma: HDMA_V0_REMOTEL_STOP_INT_EN typo fix
  dmaengine: dw-edma: Fix wrong interrupt bit set for HDMA
  dmaengine: dw-edma: Fix the ch_count hdma callback
parents e4f79000 df2515a1
...@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) ...@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
} }
static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
{
/*
* In case of remote eDMA engine setup, the DW PCIe RP/EP internal
* configuration registers and application memory are normally accessed
* over different buses. Ensure LL-data reaches the memory before the
* doorbell register is toggled by issuing the dummy-read from the remote
* LL memory in a hope that the MRd TLP will return only after the
* last MWr TLP is completed
*/
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
readl(chunk->ll_region.vaddr.io);
}
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{ {
struct dw_edma_chan *chan = chunk->chan; struct dw_edma_chan *chan = chunk->chan;
...@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ...@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, llp.msb, SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr)); upper_32_bits(chunk->ll_region.paddr));
} }
dw_edma_v0_sync_ll_data(chunk);
/* Doorbell */ /* Doorbell */
SET_RW_32(dw, chan->dir, doorbell, SET_RW_32(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
......
...@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw) ...@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{ {
u32 num_ch = 0; /*
int id; * The HDMA IP have no way to know the number of hardware channels
* available, we set it to maximum channels and let the platform
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) { * set the right number of channels.
if (GET_CH_32(dw, id, dir, ch_en) & BIT(0)) */
num_ch++; return HDMA_V0_MAX_NR_CH;
}
if (num_ch > HDMA_V0_MAX_NR_CH)
num_ch = HDMA_V0_MAX_NR_CH;
return (u16)num_ch;
} }
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan) static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
...@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk) ...@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr); dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
} }
static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
{
/*
* In case of remote HDMA engine setup, the DW PCIe RP/EP internal
* configuration registers and application memory are normally accessed
* over different buses. Ensure LL-data reaches the memory before the
* doorbell register is toggled by issuing the dummy-read from the remote
* LL memory in a hope that the MRd TLP will return only after the
* last MWr TLP is completed
*/
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
readl(chunk->ll_region.vaddr.io);
}
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{ {
struct dw_edma_chan *chan = chunk->chan; struct dw_edma_chan *chan = chunk->chan;
...@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ...@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Interrupt enable&unmask - done, abort */ /* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) | tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK | HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN; HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp); SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */ /* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN); SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
...@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ...@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Set consumer cycle */ /* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync, SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT); HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
dw_hdma_v0_sync_ll_data(chunk);
/* Doorbell */ /* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START); SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
} }
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6) #define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5) #define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4) #define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3) #define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2) #define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0) #define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0) #define HDMA_V0_LINKLIST_EN BIT(0)
......
...@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, ...@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_multi_fifo) { if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */ /* set mloff to support multiple fifo */
burst = cfg->direction == DMA_DEV_TO_MEM ? burst = cfg->direction == DMA_DEV_TO_MEM ?
cfg->src_addr_width : cfg->dst_addr_width; cfg->src_maxburst : cfg->dst_maxburst;
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4)); nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
/* enable DMLOE/SMLOE */ /* enable DMLOE/SMLOE */
if (cfg->direction == DMA_MEM_TO_DEV) { if (cfg->direction == DMA_MEM_TO_DEV) {
......
...@@ -30,8 +30,9 @@ ...@@ -30,8 +30,9 @@
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8) #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11) #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0)) #define EDMA_TCD_ITER_MASK GENMASK(14, 0)
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0)) #define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_CSR_START BIT(0) #define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1) #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
*/ */
#include <dt-bindings/dma/fsl-edma.h> #include <dt-bindings/dma/fsl-edma.h>
#include <linux/bitfield.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/clk.h> #include <linux/clk.h>
...@@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev) ...@@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
DMAENGINE_ALIGN_32_BYTES; DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */ /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff); dma_set_max_seg_size(fsl_edma->dma_dev.dev,
FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
......
...@@ -109,6 +109,7 @@ ...@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20 #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19 #define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16 #define FSL_QDMA_CMD_LWC_OFFSET 16
#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */ /* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5) #define QDMA_CCDF_STATUS_RTE BIT(5)
...@@ -160,6 +161,10 @@ struct fsl_qdma_format { ...@@ -160,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2]; u8 __reserved1[2];
u8 cfg8b_w1; u8 cfg8b_w1;
} __packed; } __packed;
struct {
__le32 __reserved2;
__le32 cmd;
} __packed;
__le64 data; __le64 data;
}; };
} __packed; } __packed;
...@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) ...@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len) dma_addr_t dst, dma_addr_t src, u32 len)
{ {
u32 cmd;
struct fsl_qdma_format *sdf, *ddf; struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
...@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, ...@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */ /* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len); qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */ /* Descriptor Buffer */
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
FSL_QDMA_CMD_RWTTYPE_OFFSET); FSL_QDMA_CMD_PF);
sdf->data = QDMA_SDDF_CMD(cmd);
ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
FSL_QDMA_CMD_RWTTYPE_OFFSET);
cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
ddf->data = QDMA_SDDF_CMD(cmd);
} }
/* /*
...@@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) ...@@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma, fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
void *block, __iomem void *block,
int id) int id)
{ {
bool duplicate; bool duplicate;
...@@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev) ...@@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue) if (!fsl_qdma->queue)
return -ENOMEM; return -ENOMEM;
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0) if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base; return fsl_qdma->irq_base;
...@@ -1238,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev) ...@@ -1238,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma); platform_set_drvdata(pdev, fsl_qdma);
ret = dma_async_device_register(&fsl_qdma->dma_dev); ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
"Can't register NXP Layerscape qDMA engine.\n");
return ret; return ret;
} }
ret = fsl_qdma_reg_init(fsl_qdma); ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret; return ret;
} }
......
...@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) ...@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
spin_lock(&evl->lock); spin_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail; t = status.tail;
h = evl->head; h = status.head;
size = evl->size; size = evl->size;
while (h != t) { while (h != t) {
......
...@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d) ...@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
spin_lock(&evl->lock); spin_lock(&evl->lock);
h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail; t = evl_status.tail;
h = evl_status.head;
evl_size = evl->size; evl_size = evl->size;
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n", seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
......
...@@ -300,7 +300,6 @@ struct idxd_evl { ...@@ -300,7 +300,6 @@ struct idxd_evl {
unsigned int log_size; unsigned int log_size;
/* The number of entries in the event log. */ /* The number of entries in the event log. */
u16 size; u16 size;
u16 head;
unsigned long *bmap; unsigned long *bmap;
bool batch_fail[IDXD_MAX_BATCH_IDENT]; bool batch_fail[IDXD_MAX_BATCH_IDENT];
}; };
......
...@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd) ...@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
static int idxd_init_evl(struct idxd_device *idxd) static int idxd_init_evl(struct idxd_device *idxd)
{ {
struct device *dev = &idxd->pdev->dev; struct device *dev = &idxd->pdev->dev;
unsigned int evl_cache_size;
struct idxd_evl *evl; struct idxd_evl *evl;
const char *idxd_name;
if (idxd->hw.gen_cap.evl_support == 0) if (idxd->hw.gen_cap.evl_support == 0)
return 0; return 0;
...@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd) ...@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
spin_lock_init(&evl->lock); spin_lock_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN; evl->size = IDXD_EVL_SIZE_MIN;
idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)), idxd_name = dev_name(idxd_confdev(idxd));
sizeof(struct idxd_evl_fault) + evl_ent_size(idxd), evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
0, 0, NULL); /*
* Since completion record in evl_cache will be copied to user
* when handling completion record page fault, need to create
* the cache suitable for user copy.
*/
idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
0, 0, 0, evl_cache_size,
NULL);
if (!idxd->evl_cache) { if (!idxd->evl_cache) {
kfree(evl); kfree(evl);
return -ENOMEM; return -ENOMEM;
......
...@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd) ...@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
/* Clear interrupt pending bit */ /* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32, iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32)); idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail; t = evl_status.tail;
h = evl_status.head;
size = idxd->evl->size; size = idxd->evl->size;
while (h != t) { while (h != t) {
...@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd) ...@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
h = (h + 1) % size; h = (h + 1) % size;
} }
evl->head = h;
evl_status.head = h; evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET); iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock); spin_unlock(&evl->lock);
......
...@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt) ...@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup; chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev); vchan_init(&chan->vc, dma_dev);
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
ret = dma_async_device_register(dma_dev); ret = dma_async_device_register(dma_dev);
if (ret) if (ret)
goto err_reg; goto err_reg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment