Commit c6ac7188 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.6' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "Late fixes in dmaengine for v5.6:

   - move .device_release missing log warning to debug

   - couple of maintainer entries for HiSilicon and IADX drivers

   - off-by-one fix for idxd driver

   - documentation warning fixes

   - TI k3 dma error handling fix"

* tag 'dmaengine-fix-5.6' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: ti: k3-udma-glue: Fix an error handling path in 'k3_udma_glue_cfg_rx_flow()'
  MAINTAINERS: Add maintainer for HiSilicon DMA engine driver
  dmaengine: idxd: fix off by one on cdev dwq refcount
  MAINTAINERS: rectify the INTEL IADX DRIVER entry
  dmaengine: move .device_release missing log warning to debug level
  docs: dmaengine: provider.rst: get rid of some warnings
parents 979e52ca 018af9be
...@@ -266,11 +266,15 @@ to use. ...@@ -266,11 +266,15 @@ to use.
attached (via the dmaengine_desc_attach_metadata() helper to the descriptor. attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
From the DMA driver the following is expected for this mode: From the DMA driver the following is expected for this mode:
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
The data from the provided metadata buffer should be prepared for the DMA The data from the provided metadata buffer should be prepared for the DMA
controller to be sent alongside of the payload data. Either by copying to a controller to be sent alongside of the payload data. Either by copying to a
hardware descriptor, or highly coupled packet. hardware descriptor, or highly coupled packet.
- DMA_DEV_TO_MEM - DMA_DEV_TO_MEM
On transfer completion the DMA driver must copy the metadata to the client On transfer completion the DMA driver must copy the metadata to the client
provided metadata buffer before notifying the client about the completion. provided metadata buffer before notifying the client about the completion.
After the transfer completion, DMA drivers must not touch the metadata After the transfer completion, DMA drivers must not touch the metadata
...@@ -284,10 +288,14 @@ to use. ...@@ -284,10 +288,14 @@ to use.
and dmaengine_desc_set_metadata_len() is provided as helper functions. and dmaengine_desc_set_metadata_len() is provided as helper functions.
From the DMA driver the following is expected for this mode: From the DMA driver the following is expected for this mode:
- get_metadata_ptr
- get_metadata_ptr()
Should return a pointer for the metadata buffer, the maximum size of the Should return a pointer for the metadata buffer, the maximum size of the
metadata buffer and the currently used / valid (if any) bytes in the buffer. metadata buffer and the currently used / valid (if any) bytes in the buffer.
- set_metadata_len
- set_metadata_len()
It is called by the clients after it have placed the metadata to the buffer It is called by the clients after it have placed the metadata to the buffer
to let the DMA driver know the number of valid bytes provided. to let the DMA driver know the number of valid bytes provided.
......
...@@ -7516,6 +7516,12 @@ F: include/uapi/linux/if_hippi.h ...@@ -7516,6 +7516,12 @@ F: include/uapi/linux/if_hippi.h
F: net/802/hippi.c F: net/802/hippi.c
F: drivers/net/hippi/ F: drivers/net/hippi/
HISILICON DMA DRIVER
M: Zhou Wang <wangzhou1@hisilicon.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/hisi_dma.c
HISILICON SECURITY ENGINE V2 DRIVER (SEC2) HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
M: Zaibo Xu <xuzaibo@huawei.com> M: Zaibo Xu <xuzaibo@huawei.com>
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
...@@ -8476,7 +8482,6 @@ L: dmaengine@vger.kernel.org ...@@ -8476,7 +8482,6 @@ L: dmaengine@vger.kernel.org
S: Supported S: Supported
F: drivers/dma/idxd/* F: drivers/dma/idxd/*
F: include/uapi/linux/idxd.h F: include/uapi/linux/idxd.h
F: include/linux/idxd.h
INTEL IDLE DRIVER INTEL IDLE DRIVER
M: Jacob Pan <jacob.jun.pan@linux.intel.com> M: Jacob Pan <jacob.jun.pan@linux.intel.com>
......
...@@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device)
} }
if (!device->device_release) if (!device->device_release)
dev_warn(device->dev, dev_dbg(device->dev,
"WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
kref_init(&device->ref); kref_init(&device->ref);
......
...@@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
dev = &idxd->pdev->dev; dev = &idxd->pdev->dev;
idxd_cdev = &wq->idxd_cdev; idxd_cdev = &wq->idxd_cdev;
dev_dbg(dev, "%s called\n", __func__); dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
return -EBUSY; return -EBUSY;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
......
...@@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
if (IS_ERR(flow->udma_rflow)) { if (IS_ERR(flow->udma_rflow)) {
ret = PTR_ERR(flow->udma_rflow); ret = PTR_ERR(flow->udma_rflow);
dev_err(dev, "UDMAX rflow get err %d\n", ret); dev_err(dev, "UDMAX rflow get err %d\n", ret);
goto err; return ret;
} }
if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); ret = -ENODEV;
return -ENODEV; goto err_rflow_put;
} }
/* request and cfg rings */ /* request and cfg rings */
...@@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
if (!flow->ringrx) { if (!flow->ringrx) {
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "Failed to get RX ring\n"); dev_err(dev, "Failed to get RX ring\n");
goto err; goto err_rflow_put;
} }
flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
...@@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
if (!flow->ringrxfdq) { if (!flow->ringrxfdq) {
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "Failed to get RXFDQ ring\n"); dev_err(dev, "Failed to get RXFDQ ring\n");
goto err; goto err_ringrx_free;
} }
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) { if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret); dev_err(dev, "Failed to cfg ringrx %d\n", ret);
goto err; goto err_ringrxfdq_free;
} }
ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
if (ret) { if (ret) {
dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
goto err; goto err_ringrxfdq_free;
} }
if (rx_chn->remote) { if (rx_chn->remote) {
...@@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
if (ret) { if (ret) {
dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
ret); ret);
goto err; goto err_ringrxfdq_free;
} }
rx_chn->flows_ready++; rx_chn->flows_ready++;
...@@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
flow->udma_rflow_id, rx_chn->flows_ready); flow->udma_rflow_id, rx_chn->flows_ready);
return 0; return 0;
err:
k3_udma_glue_release_rx_flow(rx_chn, flow_idx); err_ringrxfdq_free:
k3_ringacc_ring_free(flow->ringrxfdq);
err_ringrx_free:
k3_ringacc_ring_free(flow->ringrx);
err_rflow_put:
xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
flow->udma_rflow = NULL;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment