Commit 7e09a979 authored by Mark Brown's avatar Mark Brown

regmap: Cache async work structures

Rather than allocating and deallocating the structures used to manage async
transfers each time we do one keep the structures around as long as the
regmap is around. This should provide a small performance improvement.
Signed-off-by: default avatarMark Brown <broonie@linaro.org>
parent d0e639c9
......@@ -44,7 +44,6 @@ struct regmap_format {
struct regmap_async {
struct list_head list;
struct work_struct cleanup;
struct regmap *map;
void *work_buf;
};
......@@ -67,6 +66,7 @@ struct regmap {
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
struct list_head async_free;
int async_ret;
#ifdef CONFIG_DEBUG_FS
......
......@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val);
static void async_cleanup(struct work_struct *work)
{
struct regmap_async *async = container_of(work, struct regmap_async,
cleanup);
kfree(async->work_buf);
kfree(async);
}
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
......@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
if (config->read_flag_mask || config->write_flag_mask) {
......@@ -942,12 +934,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
*/
void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
while (!list_empty(&map->async_free)) {
async = list_first_entry_or_null(&map->async_free,
struct regmap_async,
list);
list_del(&async->list);
kfree(async->work_buf);
kfree(async);
}
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
......@@ -1115,20 +1117,31 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
if (async && map->bus->async_write) {
struct regmap_async *async = map->bus->async_alloc();
if (!async)
return -ENOMEM;
struct regmap_async *async;
trace_regmap_async_write_start(map->dev, reg, val_len);
async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA);
if (!async->work_buf) {
kfree(async);
return -ENOMEM;
spin_lock_irqsave(&map->async_lock, flags);
async = list_first_entry_or_null(&map->async_free,
struct regmap_async,
list);
if (async)
list_del(&async->list);
spin_unlock_irqrestore(&map->async_lock, flags);
if (!async) {
async = map->bus->async_alloc();
if (!async)
return -ENOMEM;
async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA);
if (!async->work_buf) {
kfree(async);
return -ENOMEM;
}
}
INIT_WORK(&async->cleanup, async_cleanup);
async->map = map;
/* If the caller supplied the value we can use it safely. */
......@@ -1152,11 +1165,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
ret);
spin_lock_irqsave(&map->async_lock, flags);
list_del(&async->list);
list_move(&async->list, &map->async_free);
spin_unlock_irqrestore(&map->async_lock, flags);
kfree(async->work_buf);
kfree(async);
}
return ret;
......@@ -1820,8 +1830,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
trace_regmap_async_io_complete(map->dev);
spin_lock(&map->async_lock);
list_del(&async->list);
list_move(&async->list, &map->async_free);
wake = list_empty(&map->async_list);
if (ret != 0)
......@@ -1829,8 +1838,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
spin_unlock(&map->async_lock);
schedule_work(&async->cleanup);
if (wake)
wake_up(&map->async_waitq);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment