Commit 08210094 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: fix channel index enumeration

When the channel register code was changed to allow hotplug operations,
dynamic indexing wasn't taken into account. When channels are randomly
plugged and unplugged out of order, the serial indexing breaks. Convert
channel indexing to using IDA tracking in order to allow dynamic
assignment. The previous code does not cause any regression bug for
existing channel allocation besides idxd driver since the hotplug usage
case is only used by idxd at this point.

With this change, the chan->idr_ref is also not needed any longer. We can
have a device with no channels registered due to hot plug. The channel
device release code no longer should attempt to free the dma device id on
the last channel release.

Fixes: e81274cd ("dmaengine: add support to dynamic register/unregister of channels")
Reported-by: default avatarYixin Zhang <yixin.zhang@intel.com>
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Tested-by: default avatarYixin Zhang <yixin.zhang@intel.com>
Link: https://lore.kernel.org/r/158679961260.7674.8485924270472851852.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 0c894463
...@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev) ...@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
struct dma_chan_dev *chan_dev; struct dma_chan_dev *chan_dev;
chan_dev = container_of(dev, typeof(*chan_dev), device); chan_dev = container_of(dev, typeof(*chan_dev), device);
if (atomic_dec_and_test(chan_dev->idr_ref)) {
ida_free(&dma_ida, chan_dev->dev_id);
kfree(chan_dev->idr_ref);
}
kfree(chan_dev); kfree(chan_dev);
} }
...@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device) ...@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
} }
static int __dma_async_device_channel_register(struct dma_device *device, static int __dma_async_device_channel_register(struct dma_device *device,
struct dma_chan *chan, struct dma_chan *chan)
int chan_id)
{ {
int rc = 0; int rc = 0;
int chancnt = device->chancnt;
atomic_t *idr_ref;
struct dma_chan *tchan;
tchan = list_first_entry_or_null(&device->channels,
struct dma_chan, device_node);
if (!tchan)
return -ENODEV;
if (tchan->dev) {
idr_ref = tchan->dev->idr_ref;
} else {
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
atomic_set(idr_ref, 0);
}
chan->local = alloc_percpu(typeof(*chan->local)); chan->local = alloc_percpu(typeof(*chan->local));
if (!chan->local) if (!chan->local)
...@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device, ...@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding * When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating. * the channel. Otherwise we are static enumerating.
*/ */
chan->chan_id = chan_id < 0 ? chancnt : chan_id; mutex_lock(&device->chan_mutex);
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
mutex_unlock(&device->chan_mutex);
if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
goto err_out;
}
chan->dev->device.class = &dma_devclass; chan->dev->device.class = &dma_devclass;
chan->dev->device.parent = device->dev; chan->dev->device.parent = device->dev;
chan->dev->chan = chan; chan->dev->chan = chan;
chan->dev->idr_ref = idr_ref;
chan->dev->dev_id = device->dev_id; chan->dev->dev_id = device->dev_id;
atomic_inc(idr_ref);
dev_set_name(&chan->dev->device, "dma%dchan%d", dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id); device->dev_id, chan->chan_id);
rc = device_register(&chan->dev->device); rc = device_register(&chan->dev->device);
if (rc) if (rc)
goto err_out; goto err_out_ida;
chan->client_count = 0; chan->client_count = 0;
device->chancnt = chan->chan_id + 1; device->chancnt++;
return 0; return 0;
err_out_ida:
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
err_out: err_out:
free_percpu(chan->local); free_percpu(chan->local);
kfree(chan->dev); kfree(chan->dev);
if (atomic_dec_return(idr_ref) == 0)
kfree(idr_ref);
return rc; return rc;
} }
...@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device, ...@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
{ {
int rc; int rc;
rc = __dma_async_device_channel_register(device, chan, -1); rc = __dma_async_device_channel_register(device, chan);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, ...@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--; device->chancnt--;
chan->dev->chan = NULL; chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device); device_unregister(&chan->dev->device);
free_percpu(chan->local); free_percpu(chan->local);
} }
...@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); ...@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
*/ */
int dma_async_device_register(struct dma_device *device) int dma_async_device_register(struct dma_device *device)
{ {
int rc, i = 0; int rc;
struct dma_chan* chan; struct dma_chan* chan;
if (!device) if (!device)
...@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0) if (rc != 0)
return rc; return rc;
mutex_init(&device->chan_mutex);
ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */ /* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) { list_for_each_entry(chan, &device->channels, device_node) {
rc = __dma_async_device_channel_register(device, chan, i++); rc = __dma_async_device_channel_register(device, chan);
if (rc < 0) if (rc < 0)
goto err_out; goto err_out;
} }
...@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device) ...@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
*/ */
dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance(); dma_channel_rebalance();
ida_free(&dma_ida, device->dev_id);
dma_device_put(device); dma_device_put(device);
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
} }
......
...@@ -341,13 +341,11 @@ struct dma_chan { ...@@ -341,13 +341,11 @@ struct dma_chan {
* @chan: driver channel device * @chan: driver channel device
* @device: sysfs device * @device: sysfs device
* @dev_id: parent dma_device dev_id * @dev_id: parent dma_device dev_id
* @idr_ref: reference count to gate release of dma_device dev_id
*/ */
struct dma_chan_dev { struct dma_chan_dev {
struct dma_chan *chan; struct dma_chan *chan;
struct device device; struct device device;
int dev_id; int dev_id;
atomic_t *idr_ref;
}; };
/** /**
...@@ -835,6 +833,8 @@ struct dma_device { ...@@ -835,6 +833,8 @@ struct dma_device {
int dev_id; int dev_id;
struct device *dev; struct device *dev;
struct module *owner; struct module *owner;
struct ida chan_ida;
struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths; u32 src_addr_widths;
u32 dst_addr_widths; u32 dst_addr_widths;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment