Commit 714b5b4c authored by Nuno Sa's avatar Nuno Sa Committed by Jonathan Cameron

iio: buffer: iio: core: move to the cleanup.h magic

Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Signed-off-by: default avatarNuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240229-iio-use-cleanup-magic-v3-3-c3d34889ae3c@analog.comSigned-off-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
parent 095be2d5
......@@ -10,6 +10,7 @@
* - Alternative access techniques?
*/
#include <linux/anon_inodes.h>
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
......@@ -533,28 +534,26 @@ static ssize_t iio_scan_el_store(struct device *dev,
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer))
return -EBUSY;
ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
goto error_ret;
} else if (state && !ret) {
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
return ret;
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
if (state && ret)
return len;
return ret < 0 ? ret : len;
if (state)
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
else
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
return ret;
return len;
}
static ssize_t iio_scan_el_ts_show(struct device *dev,
......@@ -581,16 +580,13 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer))
return -EBUSY;
buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
return len;
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
......@@ -674,21 +670,16 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
if (val == buffer->length)
return len;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
buffer->access->set_length(buffer, val);
ret = 0;
}
if (ret)
goto out;
guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer))
return -EBUSY;
buffer->access->set_length(buffer, val);
if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length;
out:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
return len;
}
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
......@@ -1268,7 +1259,6 @@ int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
if (insert_buffer == remove_buffer)
return 0;
......@@ -1277,8 +1267,8 @@ int iio_update_buffers(struct iio_dev *indio_dev,
insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL;
mutex_lock(&iio_dev_opaque->info_exist_lock);
mutex_lock(&iio_dev_opaque->mlock);
guard(mutex)(&iio_dev_opaque->info_exist_lock);
guard(mutex)(&iio_dev_opaque->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL;
......@@ -1286,23 +1276,13 @@ int iio_update_buffers(struct iio_dev *indio_dev,
if (remove_buffer && !iio_buffer_is_active(remove_buffer))
remove_buffer = NULL;
if (!insert_buffer && !remove_buffer) {
ret = 0;
goto out_unlock;
}
if (!indio_dev->info) {
ret = -ENODEV;
goto out_unlock;
}
ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
if (!insert_buffer && !remove_buffer)
return 0;
out_unlock:
mutex_unlock(&iio_dev_opaque->mlock);
mutex_unlock(&iio_dev_opaque->info_exist_lock);
if (!indio_dev->info)
return -ENODEV;
return ret;
return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
}
EXPORT_SYMBOL_GPL(iio_update_buffers);
......@@ -1326,22 +1306,22 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
mutex_lock(&iio_dev_opaque->mlock);
guard(mutex)(&iio_dev_opaque->mlock);
/* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
return len;
if (requested_state)
ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
ret = __iio_update_buffers(indio_dev, NULL, buffer);
if (ret)
return ret;
done:
mutex_unlock(&iio_dev_opaque->mlock);
return (ret < 0) ? ret : len;
return len;
}
static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
......@@ -1368,23 +1348,17 @@ static ssize_t watermark_store(struct device *dev,
if (!val)
return -EINVAL;
mutex_lock(&iio_dev_opaque->mlock);
guard(mutex)(&iio_dev_opaque->mlock);
if (val > buffer->length) {
ret = -EINVAL;
goto out;
}
if (val > buffer->length)
return -EINVAL;
if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto out;
}
if (iio_buffer_is_active(buffer))
return -EBUSY;
buffer->watermark = val;
out:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len;
return len;
}
static ssize_t data_available_show(struct device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment