Commit e81274cd authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: add support to dynamic register/unregister of channels

With the channel registration routines broken out, now add support code to
allow independent registering and unregistering of channels in a hotplug fashion.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/157965023364.73301.7821862091077299040.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent d2fb0a04
...@@ -986,6 +986,20 @@ static int __dma_async_device_channel_register(struct dma_device *device, ...@@ -986,6 +986,20 @@ static int __dma_async_device_channel_register(struct dma_device *device,
return rc; return rc;
} }
int dma_async_device_channel_register(struct dma_device *device,
struct dma_chan *chan)
{
int rc;
rc = __dma_async_device_channel_register(device, chan, -1);
if (rc < 0)
return rc;
dma_channel_rebalance();
return 0;
}
EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
static void __dma_async_device_channel_unregister(struct dma_device *device, static void __dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan) struct dma_chan *chan)
{ {
...@@ -993,12 +1007,22 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, ...@@ -993,12 +1007,22 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
"%s called while %d clients hold a reference\n", "%s called while %d clients hold a reference\n",
__func__, chan->client_count); __func__, chan->client_count);
mutex_lock(&dma_list_mutex); mutex_lock(&dma_list_mutex);
list_del(&chan->device_node);
device->chancnt--;
chan->dev->chan = NULL; chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
device_unregister(&chan->dev->device); device_unregister(&chan->dev->device);
free_percpu(chan->local); free_percpu(chan->local);
} }
void dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan)
{
__dma_async_device_channel_unregister(device, chan);
dma_channel_rebalance();
}
EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
/** /**
* dma_async_device_register - registers DMA devices found * dma_async_device_register - registers DMA devices found
* @device: &dma_device * @device: &dma_device
...@@ -1121,12 +1145,6 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1121,12 +1145,6 @@ int dma_async_device_register(struct dma_device *device)
goto err_out; goto err_out;
} }
if (!device->chancnt) {
dev_err(device->dev, "%s: device has no channels!\n", __func__);
rc = -ENODEV;
goto err_out;
}
mutex_lock(&dma_list_mutex); mutex_lock(&dma_list_mutex);
/* take references on public channels */ /* take references on public channels */
if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
...@@ -1181,9 +1199,9 @@ EXPORT_SYMBOL(dma_async_device_register); ...@@ -1181,9 +1199,9 @@ EXPORT_SYMBOL(dma_async_device_register);
*/ */
void dma_async_device_unregister(struct dma_device *device) void dma_async_device_unregister(struct dma_device *device)
{ {
struct dma_chan *chan; struct dma_chan *chan, *n;
list_for_each_entry(chan, &device->channels, device_node) list_for_each_entry_safe(chan, n, &device->channels, device_node)
__dma_async_device_channel_unregister(device, chan); __dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex); mutex_lock(&dma_list_mutex);
......
...@@ -1521,6 +1521,10 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) ...@@ -1521,6 +1521,10 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
int dma_async_device_register(struct dma_device *device); int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device); int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device);
int dma_async_device_channel_register(struct dma_device *device,
struct dma_chan *chan);
void dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx); void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
#define dma_request_channel(mask, x, y) \ #define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL) __dma_request_channel(&(mask), x, y, NULL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment