Commit 7b053842 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'regmap-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap

Pull regmap updates from Mark Brown:
 "In user visible terms just a couple of enhancements here, though there
  was a moderate amount of refactoring required in order to support the
  register cache sync performance improvements.

   - Support for block and asynchronous I/O during register cache
     syncing; this provides a use case dependant performance
     improvement.
   - Additional debugfs information on the memory consuption and
     register set"

* tag 'regmap-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap: (23 commits)
  regmap: don't corrupt work buffer in _regmap_raw_write()
  regmap: cache: Fix format specifier in dev_dbg
  regmap: cache: Make regcache_sync_block_raw static
  regmap: cache: Write consecutive registers in a single block write
  regmap: cache: Split raw and non-raw syncs
  regmap: cache: Factor out block sync
  regmap: cache: Factor out reg_present support from rbtree cache
  regmap: cache: Use raw I/O to sync rbtrees if we can
  regmap: core: Provide regmap_can_raw_write() operation
  regmap: cache: Provide a get address of value operation
  regmap: Cut down on the average # of nodes in the rbtree cache
  regmap: core: Make raw write available to regcache
  regmap: core: Warn on invalid operation combinations
  regmap: irq: Clarify error message when we fail to request primary IRQ
  regmap: rbtree Expose total memory consumption in the rbtree debugfs entry
  regmap: debugfs: Add a registers `range' file
  regmap: debugfs: Simplify calculation of `c->max_reg'
  regmap: cache: Store caches in native register format where possible
  regmap: core: Split out in place value parsing
  regmap: cache: Use regcache_get_value() to check if we updated
  ...
parents 5415ba99 38a81796
...@@ -38,7 +38,8 @@ struct regmap_format { ...@@ -38,7 +38,8 @@ struct regmap_format {
unsigned int reg, unsigned int val); unsigned int reg, unsigned int val);
void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
void (*format_val)(void *buf, unsigned int val, unsigned int shift); void (*format_val)(void *buf, unsigned int val, unsigned int shift);
unsigned int (*parse_val)(void *buf); unsigned int (*parse_val)(const void *buf);
void (*parse_inplace)(void *buf);
}; };
struct regmap_async { struct regmap_async {
...@@ -76,6 +77,7 @@ struct regmap { ...@@ -76,6 +77,7 @@ struct regmap {
unsigned int debugfs_tot_len; unsigned int debugfs_tot_len;
struct list_head debugfs_off_cache; struct list_head debugfs_off_cache;
struct mutex cache_lock;
#endif #endif
unsigned int max_register; unsigned int max_register;
...@@ -125,6 +127,9 @@ struct regmap { ...@@ -125,6 +127,9 @@ struct regmap {
void *cache; void *cache;
u32 cache_dirty; u32 cache_dirty;
unsigned long *cache_present;
unsigned int cache_present_nbits;
struct reg_default *patch; struct reg_default *patch;
int patch_regs; int patch_regs;
...@@ -187,12 +192,35 @@ int regcache_read(struct regmap *map, ...@@ -187,12 +192,35 @@ int regcache_read(struct regmap *map,
int regcache_write(struct regmap *map, int regcache_write(struct regmap *map,
unsigned int reg, unsigned int value); unsigned int reg, unsigned int value);
int regcache_sync(struct regmap *map); int regcache_sync(struct regmap *map);
int regcache_sync_block(struct regmap *map, void *block,
unsigned int regcache_get_val(const void *base, unsigned int idx, unsigned int block_base, unsigned int start,
unsigned int word_size); unsigned int end);
bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size); static inline const void *regcache_get_val_addr(struct regmap *map,
const void *base,
unsigned int idx)
{
return base + (map->cache_word_size * idx);
}
unsigned int regcache_get_val(struct regmap *map, const void *base,
unsigned int idx);
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg); int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int regcache_set_reg_present(struct regmap *map, unsigned int reg);
static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
{
if (!map->cache_present)
return true;
if (reg > map->cache_present_nbits)
return false;
return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool async);
void regmap_async_complete_cb(struct regmap_async *async, int ret); void regmap_async_complete_cb(struct regmap_async *async, int ret);
......
...@@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map, ...@@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map,
ret = regcache_lzo_decompress_cache_block(map, lzo_block); ret = regcache_lzo_decompress_cache_block(map, lzo_block);
if (ret >= 0) if (ret >= 0)
/* fetch the value from the cache */ /* fetch the value from the cache */
*value = regcache_get_val(lzo_block->dst, blkpos, *value = regcache_get_val(map, lzo_block->dst, blkpos);
map->cache_word_size);
kfree(lzo_block->dst); kfree(lzo_block->dst);
/* restore the pointer and length of the compressed block */ /* restore the pointer and length of the compressed block */
...@@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map, ...@@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map,
} }
/* write the new value to the cache */ /* write the new value to the cache */
if (regcache_set_val(lzo_block->dst, blkpos, value, if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
map->cache_word_size)) {
kfree(lzo_block->dst); kfree(lzo_block->dst);
goto out; goto out;
} }
......
...@@ -47,22 +47,21 @@ static inline void regcache_rbtree_get_base_top_reg( ...@@ -47,22 +47,21 @@ static inline void regcache_rbtree_get_base_top_reg(
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
} }
static unsigned int regcache_rbtree_get_register( static unsigned int regcache_rbtree_get_register(struct regmap *map,
struct regcache_rbtree_node *rbnode, unsigned int idx, struct regcache_rbtree_node *rbnode, unsigned int idx)
unsigned int word_size)
{ {
return regcache_get_val(rbnode->block, idx, word_size); return regcache_get_val(map, rbnode->block, idx);
} }
static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, static void regcache_rbtree_set_register(struct regmap *map,
unsigned int idx, unsigned int val, struct regcache_rbtree_node *rbnode,
unsigned int word_size) unsigned int idx, unsigned int val)
{ {
regcache_set_val(rbnode->block, idx, val, word_size); regcache_set_val(map, rbnode->block, idx, val);
} }
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
unsigned int reg) unsigned int reg)
{ {
struct regcache_rbtree_ctx *rbtree_ctx = map->cache; struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
struct rb_node *node; struct rb_node *node;
...@@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored) ...@@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored)
struct regcache_rbtree_node *n; struct regcache_rbtree_node *n;
struct rb_node *node; struct rb_node *node;
unsigned int base, top; unsigned int base, top;
size_t mem_size;
int nodes = 0; int nodes = 0;
int registers = 0; int registers = 0;
int this_registers, average; int this_registers, average;
map->lock(map); map->lock(map);
mem_size = sizeof(*rbtree_ctx);
mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
for (node = rb_first(&rbtree_ctx->root); node != NULL; for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) { node = rb_next(node)) {
n = container_of(node, struct regcache_rbtree_node, node); n = container_of(node, struct regcache_rbtree_node, node);
mem_size += sizeof(*n);
mem_size += (n->blklen * map->cache_word_size);
regcache_rbtree_get_base_top_reg(map, n, &base, &top); regcache_rbtree_get_base_top_reg(map, n, &base, &top);
this_registers = ((top - base) / map->reg_stride) + 1; this_registers = ((top - base) / map->reg_stride) + 1;
...@@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored) ...@@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored)
else else
average = 0; average = 0;
seq_printf(s, "%d nodes, %d registers, average %d registers\n", seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
nodes, registers, average); nodes, registers, average, mem_size);
map->unlock(map); map->unlock(map);
...@@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map, ...@@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map,
rbnode = regcache_rbtree_lookup(map, reg); rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) { if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
*value = regcache_rbtree_get_register(rbnode, reg_tmp, if (!regcache_reg_present(map, reg))
map->cache_word_size); return -ENOENT;
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
} else { } else {
return -ENOENT; return -ENOENT;
} }
...@@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map, ...@@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map,
} }
static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, static int regcache_rbtree_insert_to_block(struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int pos, unsigned int reg, unsigned int pos, unsigned int reg,
unsigned int value, unsigned int word_size) unsigned int value)
{ {
u8 *blk; u8 *blk;
blk = krealloc(rbnode->block, blk = krealloc(rbnode->block,
(rbnode->blklen + 1) * word_size, GFP_KERNEL); (rbnode->blklen + 1) * map->cache_word_size,
GFP_KERNEL);
if (!blk) if (!blk)
return -ENOMEM; return -ENOMEM;
/* insert the register value in the correct place in the rbnode block */ /* insert the register value in the correct place in the rbnode block */
memmove(blk + (pos + 1) * word_size, memmove(blk + (pos + 1) * map->cache_word_size,
blk + pos * word_size, blk + pos * map->cache_word_size,
(rbnode->blklen - pos) * word_size); (rbnode->blklen - pos) * map->cache_word_size);
/* update the rbnode block, its size and the base register */ /* update the rbnode block, its size and the base register */
rbnode->block = blk; rbnode->block = blk;
...@@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, ...@@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
if (!pos) if (!pos)
rbnode->base_reg = reg; rbnode->base_reg = reg;
regcache_rbtree_set_register(rbnode, pos, value, word_size); regcache_rbtree_set_register(map, rbnode, pos, value);
return 0; return 0;
} }
...@@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, ...@@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
struct regcache_rbtree_ctx *rbtree_ctx; struct regcache_rbtree_ctx *rbtree_ctx;
struct regcache_rbtree_node *rbnode, *rbnode_tmp; struct regcache_rbtree_node *rbnode, *rbnode_tmp;
struct rb_node *node; struct rb_node *node;
unsigned int val;
unsigned int reg_tmp; unsigned int reg_tmp;
unsigned int pos; unsigned int pos;
int i; int i;
int ret; int ret;
rbtree_ctx = map->cache; rbtree_ctx = map->cache;
/* update the reg_present bitmap, make space if necessary */
ret = regcache_set_reg_present(map, reg);
if (ret < 0)
return ret;
/* if we can't locate it in the cached rbnode we'll have /* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it. * to traverse the rbtree looking for it.
*/ */
rbnode = regcache_rbtree_lookup(map, reg); rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) { if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
val = regcache_rbtree_get_register(rbnode, reg_tmp, regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
map->cache_word_size);
if (val == value)
return 0;
regcache_rbtree_set_register(rbnode, reg_tmp, value,
map->cache_word_size);
} else { } else {
/* look for an adjacent register to the one we are about to add */ /* look for an adjacent register to the one we are about to add */
for (node = rb_first(&rbtree_ctx->root); node; for (node = rb_first(&rbtree_ctx->root); node;
...@@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, ...@@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
pos = i + 1; pos = i + 1;
else else
pos = i; pos = i;
ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, ret = regcache_rbtree_insert_to_block(map,
reg, value, rbnode_tmp,
map->cache_word_size); pos, reg,
value);
if (ret) if (ret)
return ret; return ret;
rbtree_ctx->cached_rbnode = rbnode_tmp; rbtree_ctx->cached_rbnode = rbnode_tmp;
...@@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, ...@@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
if (!rbnode) if (!rbnode)
return -ENOMEM; return -ENOMEM;
rbnode->blklen = 1; rbnode->blklen = sizeof(*rbnode);
rbnode->base_reg = reg; rbnode->base_reg = reg;
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL); GFP_KERNEL);
...@@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, ...@@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
kfree(rbnode); kfree(rbnode);
return -ENOMEM; return -ENOMEM;
} }
regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); regcache_rbtree_set_register(map, rbnode, 0, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode; rbtree_ctx->cached_rbnode = rbnode;
} }
...@@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, ...@@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
struct regcache_rbtree_ctx *rbtree_ctx; struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node; struct rb_node *node;
struct regcache_rbtree_node *rbnode; struct regcache_rbtree_node *rbnode;
unsigned int regtmp;
unsigned int val;
int ret; int ret;
int i, base, end; int base, end;
rbtree_ctx = map->cache; rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
...@@ -402,27 +408,13 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, ...@@ -402,27 +408,13 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
else else
end = rbnode->blklen; end = rbnode->blklen;
for (i = base; i < end; i++) { ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
regtmp = rbnode->base_reg + (i * map->reg_stride); base, end);
val = regcache_rbtree_get_register(rbnode, i, if (ret != 0)
map->cache_word_size); return ret;
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg(map, regtmp);
if (ret >= 0 && val == map->reg_defaults[ret].def)
continue;
map->cache_bypass = 1;
ret = _regmap_write(map, regtmp, val);
map->cache_bypass = 0;
if (ret)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
regtmp, val);
}
} }
return 0; return regmap_async_complete(map);
} }
struct regcache_ops regcache_rbtree_ops = { struct regcache_ops regcache_rbtree_ops = {
......
...@@ -45,8 +45,8 @@ static int regcache_hw_init(struct regmap *map) ...@@ -45,8 +45,8 @@ static int regcache_hw_init(struct regmap *map)
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) if (!tmp_buf)
return -EINVAL; return -EINVAL;
ret = regmap_bulk_read(map, 0, tmp_buf, ret = regmap_raw_read(map, 0, tmp_buf,
map->num_reg_defaults_raw); map->num_reg_defaults_raw);
map->cache_bypass = cache_bypass; map->cache_bypass = cache_bypass;
if (ret < 0) { if (ret < 0) {
kfree(tmp_buf); kfree(tmp_buf);
...@@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map) ...@@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map)
/* calculate the size of reg_defaults */ /* calculate the size of reg_defaults */
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
val = regcache_get_val(map->reg_defaults_raw, val = regcache_get_val(map, map->reg_defaults_raw, i);
i, map->cache_word_size);
if (regmap_volatile(map, i * map->reg_stride)) if (regmap_volatile(map, i * map->reg_stride))
continue; continue;
count++; count++;
...@@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map) ...@@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map)
/* fill the reg_defaults */ /* fill the reg_defaults */
map->num_reg_defaults = count; map->num_reg_defaults = count;
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
val = regcache_get_val(map->reg_defaults_raw, val = regcache_get_val(map, map->reg_defaults_raw, i);
i, map->cache_word_size);
if (regmap_volatile(map, i * map->reg_stride)) if (regmap_volatile(map, i * map->reg_stride))
continue; continue;
map->reg_defaults[j].reg = i * map->reg_stride; map->reg_defaults[j].reg = i * map->reg_stride;
...@@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) ...@@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->reg_defaults_raw = config->reg_defaults_raw; map->reg_defaults_raw = config->reg_defaults_raw;
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache_present = NULL;
map->cache_present_nbits = 0;
map->cache = NULL; map->cache = NULL;
map->cache_ops = cache_types[i]; map->cache_ops = cache_types[i];
...@@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map) ...@@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map)
BUG_ON(!map->cache_ops); BUG_ON(!map->cache_ops);
kfree(map->cache_present);
kfree(map->reg_defaults); kfree(map->reg_defaults);
if (map->cache_free) if (map->cache_free)
kfree(map->reg_defaults_raw); kfree(map->reg_defaults_raw);
...@@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable) ...@@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
} }
EXPORT_SYMBOL_GPL(regcache_cache_bypass); EXPORT_SYMBOL_GPL(regcache_cache_bypass);
bool regcache_set_val(void *base, unsigned int idx, int regcache_set_reg_present(struct regmap *map, unsigned int reg)
unsigned int val, unsigned int word_size)
{ {
switch (word_size) { unsigned long *cache_present;
unsigned int cache_present_size;
unsigned int nregs;
int i;
nregs = reg + 1;
cache_present_size = BITS_TO_LONGS(nregs);
cache_present_size *= sizeof(long);
if (!map->cache_present) {
cache_present = kmalloc(cache_present_size, GFP_KERNEL);
if (!cache_present)
return -ENOMEM;
bitmap_zero(cache_present, nregs);
map->cache_present = cache_present;
map->cache_present_nbits = nregs;
}
if (nregs > map->cache_present_nbits) {
cache_present = krealloc(map->cache_present,
cache_present_size, GFP_KERNEL);
if (!cache_present)
return -ENOMEM;
for (i = 0; i < nregs; i++)
if (i >= map->cache_present_nbits)
clear_bit(i, cache_present);
map->cache_present = cache_present;
map->cache_present_nbits = nregs;
}
set_bit(reg, map->cache_present);
return 0;
}
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val)
{
if (regcache_get_val(map, base, idx) == val)
return true;
/* Use device native format if possible */
if (map->format.format_val) {
map->format.format_val(base + (map->cache_word_size * idx),
val, 0);
return false;
}
switch (map->cache_word_size) {
case 1: { case 1: {
u8 *cache = base; u8 *cache = base;
if (cache[idx] == val)
return true;
cache[idx] = val; cache[idx] = val;
break; break;
} }
case 2: { case 2: {
u16 *cache = base; u16 *cache = base;
if (cache[idx] == val)
return true;
cache[idx] = val; cache[idx] = val;
break; break;
} }
case 4: { case 4: {
u32 *cache = base; u32 *cache = base;
if (cache[idx] == val)
return true;
cache[idx] = val; cache[idx] = val;
break; break;
} }
...@@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx, ...@@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx,
return false; return false;
} }
unsigned int regcache_get_val(const void *base, unsigned int idx, unsigned int regcache_get_val(struct regmap *map, const void *base,
unsigned int word_size) unsigned int idx)
{ {
if (!base) if (!base)
return -EINVAL; return -EINVAL;
switch (word_size) { /* Use device native format if possible */
if (map->format.parse_val)
return map->format.parse_val(regcache_get_val_addr(map, base,
idx));
switch (map->cache_word_size) {
case 1: { case 1: {
const u8 *cache = base; const u8 *cache = base;
return cache[idx]; return cache[idx];
...@@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg) ...@@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
else else
return -ENOENT; return -ENOENT;
} }
static int regcache_sync_block_single(struct regmap *map, void *block,
unsigned int block_base,
unsigned int start, unsigned int end)
{
unsigned int i, regtmp, val;
int ret;
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
if (!regcache_reg_present(map, regtmp))
continue;
val = regcache_get_val(map, block, i);
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg(map, regtmp);
if (ret >= 0 && val == map->reg_defaults[ret].def)
continue;
map->cache_bypass = 1;
ret = _regmap_write(map, regtmp, val);
map->cache_bypass = 0;
if (ret != 0)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
regtmp, val);
}
return 0;
}
static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
unsigned int base, unsigned int cur)
{
size_t val_bytes = map->format.val_bytes;
int ret, count;
if (*data == NULL)
return 0;
count = cur - base;
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
count * val_bytes, count, base, cur - 1);
map->cache_bypass = 1;
ret = _regmap_raw_write(map, base, *data, count * val_bytes,
false);
map->cache_bypass = 0;
*data = NULL;
return ret;
}
static int regcache_sync_block_raw(struct regmap *map, void *block,
unsigned int block_base, unsigned int start,
unsigned int end)
{
unsigned int i, val;
unsigned int regtmp = 0;
unsigned int base = 0;
const void *data = NULL;
int ret;
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
if (!regcache_reg_present(map, regtmp)) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
return ret;
continue;
}
val = regcache_get_val(map, block, i);
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg(map, regtmp);
if (ret >= 0 && val == map->reg_defaults[ret].def) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
return ret;
continue;
}
if (!data) {
data = regcache_get_val_addr(map, block, i);
base = regtmp;
}
}
return regcache_sync_block_raw_flush(map, &data, base, regtmp);
}
int regcache_sync_block(struct regmap *map, void *block,
unsigned int block_base, unsigned int start,
unsigned int end)
{
if (regmap_can_raw_write(map))
return regcache_sync_block_raw(map, block, block_base,
start, end);
else
return regcache_sync_block_single(map, block, block_base,
start, end);
}
...@@ -88,16 +88,16 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, ...@@ -88,16 +88,16 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
* If we don't have a cache build one so we don't have to do a * If we don't have a cache build one so we don't have to do a
* linear scan each time. * linear scan each time.
*/ */
mutex_lock(&map->cache_lock);
i = base;
if (list_empty(&map->debugfs_off_cache)) { if (list_empty(&map->debugfs_off_cache)) {
for (i = base; i <= map->max_register; i += map->reg_stride) { for (; i <= map->max_register; i += map->reg_stride) {
/* Skip unprinted registers, closing off cache entry */ /* Skip unprinted registers, closing off cache entry */
if (!regmap_readable(map, i) || if (!regmap_readable(map, i) ||
regmap_precious(map, i)) { regmap_precious(map, i)) {
if (c) { if (c) {
c->max = p - 1; c->max = p - 1;
fpos_offset = c->max - c->min; c->max_reg = i - map->reg_stride;
reg_offset = fpos_offset / map->debugfs_tot_len;
c->max_reg = c->base_reg + reg_offset;
list_add_tail(&c->list, list_add_tail(&c->list,
&map->debugfs_off_cache); &map->debugfs_off_cache);
c = NULL; c = NULL;
...@@ -111,6 +111,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, ...@@ -111,6 +111,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
c = kzalloc(sizeof(*c), GFP_KERNEL); c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) { if (!c) {
regmap_debugfs_free_dump_cache(map); regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
return base; return base;
} }
c->min = p; c->min = p;
...@@ -124,9 +125,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, ...@@ -124,9 +125,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* Close the last entry off if we didn't scan beyond it */ /* Close the last entry off if we didn't scan beyond it */
if (c) { if (c) {
c->max = p - 1; c->max = p - 1;
fpos_offset = c->max - c->min; c->max_reg = i - map->reg_stride;
reg_offset = fpos_offset / map->debugfs_tot_len;
c->max_reg = c->base_reg + reg_offset;
list_add_tail(&c->list, list_add_tail(&c->list,
&map->debugfs_off_cache); &map->debugfs_off_cache);
} }
...@@ -145,12 +144,14 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, ...@@ -145,12 +144,14 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
fpos_offset = from - c->min; fpos_offset = from - c->min;
reg_offset = fpos_offset / map->debugfs_tot_len; reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len); *pos = c->min + (reg_offset * map->debugfs_tot_len);
mutex_unlock(&map->cache_lock);
return c->base_reg + reg_offset; return c->base_reg + reg_offset;
} }
*pos = c->max; *pos = c->max;
ret = c->max_reg; ret = c->max_reg;
} }
mutex_unlock(&map->cache_lock);
return ret; return ret;
} }
...@@ -311,6 +312,79 @@ static const struct file_operations regmap_range_fops = { ...@@ -311,6 +312,79 @@ static const struct file_operations regmap_range_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static ssize_t regmap_reg_ranges_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct regmap *map = file->private_data;
struct regmap_debugfs_off_cache *c;
loff_t p = 0;
size_t buf_pos = 0;
char *buf;
char *entry;
int ret;
if (*ppos < 0 || !count)
return -EINVAL;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!entry) {
kfree(buf);
return -ENOMEM;
}
/* While we are at it, build the register dump cache
* now so the read() operation on the `registers' file
* can benefit from using the cache. We do not care
* about the file position information that is contained
* in the cache, just about the actual register blocks */
regmap_calc_tot_len(map, buf, count);
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
/* Reset file pointer as the fixed-format of the `registers'
* file is not compatible with the `range' file */
p = 0;
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
snprintf(entry, PAGE_SIZE, "%x-%x",
c->base_reg, c->max_reg);
if (p >= *ppos) {
if (buf_pos + 1 + strlen(entry) > count)
break;
snprintf(buf + buf_pos, count - buf_pos,
"%s", entry);
buf_pos += strlen(entry);
buf[buf_pos] = '\n';
buf_pos++;
}
p += strlen(entry) + 1;
}
mutex_unlock(&map->cache_lock);
kfree(entry);
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT;
goto out_buf;
}
*ppos += buf_pos;
out_buf:
kfree(buf);
return ret;
}
static const struct file_operations regmap_reg_ranges_fops = {
.open = simple_open,
.read = regmap_reg_ranges_read_file,
.llseek = default_llseek,
};
static ssize_t regmap_access_read_file(struct file *file, static ssize_t regmap_access_read_file(struct file *file,
char __user *user_buf, size_t count, char __user *user_buf, size_t count,
loff_t *ppos) loff_t *ppos)
...@@ -385,6 +459,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name) ...@@ -385,6 +459,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node; struct regmap_range_node *range_node;
INIT_LIST_HEAD(&map->debugfs_off_cache); INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
if (name) { if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
...@@ -403,6 +478,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name) ...@@ -403,6 +478,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("name", 0400, map->debugfs, debugfs_create_file("name", 0400, map->debugfs,
map, &regmap_name_fops); map, &regmap_name_fops);
debugfs_create_file("range", 0400, map->debugfs,
map, &regmap_reg_ranges_fops);
if (map->max_register) { if (map->max_register) {
debugfs_create_file("registers", 0400, map->debugfs, debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops); map, &regmap_map_fops);
...@@ -435,7 +513,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name) ...@@ -435,7 +513,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map) void regmap_debugfs_exit(struct regmap *map)
{ {
debugfs_remove_recursive(map->debugfs); debugfs_remove_recursive(map->debugfs);
mutex_lock(&map->cache_lock);
regmap_debugfs_free_dump_cache(map); regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name); kfree(map->debugfs_name);
} }
......
...@@ -460,7 +460,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, ...@@ -460,7 +460,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
chip->name, d); chip->name, d);
if (ret != 0) { if (ret != 0) {
dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
irq, chip->name, ret);
goto err_domain; goto err_domain;
} }
......
...@@ -228,30 +228,39 @@ static void regmap_format_32_native(void *buf, unsigned int val, ...@@ -228,30 +228,39 @@ static void regmap_format_32_native(void *buf, unsigned int val,
*(u32 *)buf = val << shift; *(u32 *)buf = val << shift;
} }
static unsigned int regmap_parse_8(void *buf) static void regmap_parse_inplace_noop(void *buf)
{ {
u8 *b = buf; }
static unsigned int regmap_parse_8(const void *buf)
{
const u8 *b = buf;
return b[0]; return b[0];
} }
static unsigned int regmap_parse_16_be(void *buf) static unsigned int regmap_parse_16_be(const void *buf)
{
const __be16 *b = buf;
return be16_to_cpu(b[0]);
}
static void regmap_parse_16_be_inplace(void *buf)
{ {
__be16 *b = buf; __be16 *b = buf;
b[0] = be16_to_cpu(b[0]); b[0] = be16_to_cpu(b[0]);
return b[0];
} }
static unsigned int regmap_parse_16_native(void *buf) static unsigned int regmap_parse_16_native(const void *buf)
{ {
return *(u16 *)buf; return *(u16 *)buf;
} }
static unsigned int regmap_parse_24(void *buf) static unsigned int regmap_parse_24(const void *buf)
{ {
u8 *b = buf; const u8 *b = buf;
unsigned int ret = b[2]; unsigned int ret = b[2];
ret |= ((unsigned int)b[1]) << 8; ret |= ((unsigned int)b[1]) << 8;
ret |= ((unsigned int)b[0]) << 16; ret |= ((unsigned int)b[0]) << 16;
...@@ -259,16 +268,21 @@ static unsigned int regmap_parse_24(void *buf) ...@@ -259,16 +268,21 @@ static unsigned int regmap_parse_24(void *buf)
return ret; return ret;
} }
static unsigned int regmap_parse_32_be(void *buf) static unsigned int regmap_parse_32_be(const void *buf)
{
const __be32 *b = buf;
return be32_to_cpu(b[0]);
}
static void regmap_parse_32_be_inplace(void *buf)
{ {
__be32 *b = buf; __be32 *b = buf;
b[0] = be32_to_cpu(b[0]); b[0] = be32_to_cpu(b[0]);
return b[0];
} }
static unsigned int regmap_parse_32_native(void *buf) static unsigned int regmap_parse_32_native(const void *buf)
{ {
return *(u32 *)buf; return *(u32 *)buf;
} }
...@@ -555,16 +569,21 @@ struct regmap *regmap_init(struct device *dev, ...@@ -555,16 +569,21 @@ struct regmap *regmap_init(struct device *dev,
goto err_map; goto err_map;
} }
if (val_endian == REGMAP_ENDIAN_NATIVE)
map->format.parse_inplace = regmap_parse_inplace_noop;
switch (config->val_bits) { switch (config->val_bits) {
case 8: case 8:
map->format.format_val = regmap_format_8; map->format.format_val = regmap_format_8;
map->format.parse_val = regmap_parse_8; map->format.parse_val = regmap_parse_8;
map->format.parse_inplace = regmap_parse_inplace_noop;
break; break;
case 16: case 16:
switch (val_endian) { switch (val_endian) {
case REGMAP_ENDIAN_BIG: case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_16_be; map->format.format_val = regmap_format_16_be;
map->format.parse_val = regmap_parse_16_be; map->format.parse_val = regmap_parse_16_be;
map->format.parse_inplace = regmap_parse_16_be_inplace;
break; break;
case REGMAP_ENDIAN_NATIVE: case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_16_native; map->format.format_val = regmap_format_16_native;
...@@ -585,6 +604,7 @@ struct regmap *regmap_init(struct device *dev, ...@@ -585,6 +604,7 @@ struct regmap *regmap_init(struct device *dev,
case REGMAP_ENDIAN_BIG: case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_32_be; map->format.format_val = regmap_format_32_be;
map->format.parse_val = regmap_parse_32_be; map->format.parse_val = regmap_parse_32_be;
map->format.parse_inplace = regmap_parse_32_be_inplace;
break; break;
case REGMAP_ENDIAN_NATIVE: case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_32_native; map->format.format_val = regmap_format_32_native;
...@@ -917,8 +937,8 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg, ...@@ -917,8 +937,8 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
return 0; return 0;
} }
static int _regmap_raw_write(struct regmap *map, unsigned int reg, int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool async) const void *val, size_t val_len, bool async)
{ {
struct regmap_range_node *range; struct regmap_range_node *range;
unsigned long flags; unsigned long flags;
...@@ -930,7 +950,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, ...@@ -930,7 +950,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t len; size_t len;
int i; int i;
BUG_ON(!map->bus); WARN_ON(!map->bus);
/* Check for unwritable registers before we start */ /* Check for unwritable registers before we start */
if (map->writeable_reg) if (map->writeable_reg)
...@@ -943,8 +963,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, ...@@ -943,8 +963,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
unsigned int ival; unsigned int ival;
int val_bytes = map->format.val_bytes; int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) { for (i = 0; i < val_len / val_bytes; i++) {
memcpy(map->work_buf, val + (i * val_bytes), val_bytes); ival = map->format.parse_val(val + (i * val_bytes));
ival = map->format.parse_val(map->work_buf);
ret = regcache_write(map, reg + (i * map->reg_stride), ret = regcache_write(map, reg + (i * map->reg_stride),
ival); ival);
if (ret) { if (ret) {
...@@ -999,6 +1018,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, ...@@ -999,6 +1018,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (!async) if (!async)
return -ENOMEM; return -ENOMEM;
trace_regmap_async_write_start(map->dev, reg, val_len);
async->work_buf = kzalloc(map->format.buf_size, async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!async->work_buf) { if (!async->work_buf) {
...@@ -1079,6 +1100,17 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, ...@@ -1079,6 +1100,17 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret; return ret;
} }
/**
* regmap_can_raw_write - Test if regmap_raw_write() is supported
*
* @map: Map to check.
*/
bool regmap_can_raw_write(struct regmap *map)
{
return map->bus && map->format.format_val && map->format.format_reg;
}
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
static int _regmap_bus_formatted_write(void *context, unsigned int reg, static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val) unsigned int val)
{ {
...@@ -1086,7 +1118,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, ...@@ -1086,7 +1118,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
struct regmap_range_node *range; struct regmap_range_node *range;
struct regmap *map = context; struct regmap *map = context;
BUG_ON(!map->bus || !map->format.format_write); WARN_ON(!map->bus || !map->format.format_write);
range = _regmap_range_lookup(map, reg); range = _regmap_range_lookup(map, reg);
if (range) { if (range) {
...@@ -1112,7 +1144,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, ...@@ -1112,7 +1144,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
{ {
struct regmap *map = context; struct regmap *map = context;
BUG_ON(!map->bus || !map->format.format_val); WARN_ON(!map->bus || !map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0); + map->format.pad_bytes, val, 0);
...@@ -1202,12 +1234,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, ...@@ -1202,12 +1234,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{ {
int ret; int ret;
if (!map->bus) if (!regmap_can_raw_write(map))
return -EINVAL; return -EINVAL;
if (val_len % map->format.val_bytes) if (val_len % map->format.val_bytes)
return -EINVAL; return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
map->lock(map->lock_arg); map->lock(map->lock_arg);
...@@ -1242,7 +1272,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, ...@@ -1242,7 +1272,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (!map->bus) if (!map->bus)
return -EINVAL; return -EINVAL;
if (!map->format.parse_val) if (!map->format.parse_inplace)
return -EINVAL; return -EINVAL;
if (reg % map->reg_stride) if (reg % map->reg_stride)
return -EINVAL; return -EINVAL;
...@@ -1260,7 +1290,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, ...@@ -1260,7 +1290,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
goto out; goto out;
} }
for (i = 0; i < val_count * val_bytes; i += val_bytes) for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_val(wval + i); map->format.parse_inplace(wval + i);
} }
/* /*
* Some devices does not support bulk write, for * Some devices does not support bulk write, for
...@@ -1338,7 +1368,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, ...@@ -1338,7 +1368,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf; u8 *u8 = map->work_buf;
int ret; int ret;
BUG_ON(!map->bus); WARN_ON(!map->bus);
range = _regmap_range_lookup(map, reg); range = _regmap_range_lookup(map, reg);
if (range) { if (range) {
...@@ -1393,7 +1423,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, ...@@ -1393,7 +1423,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
int ret; int ret;
void *context = _regmap_map_get_context(map); void *context = _regmap_map_get_context(map);
BUG_ON(!map->reg_read); WARN_ON(!map->reg_read);
if (!map->cache_bypass) { if (!map->cache_bypass) {
ret = regcache_read(map, reg, val); ret = regcache_read(map, reg, val);
...@@ -1521,7 +1551,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, ...@@ -1521,7 +1551,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (!map->bus) if (!map->bus)
return -EINVAL; return -EINVAL;
if (!map->format.parse_val) if (!map->format.parse_inplace)
return -EINVAL; return -EINVAL;
if (reg % map->reg_stride) if (reg % map->reg_stride)
return -EINVAL; return -EINVAL;
...@@ -1548,7 +1578,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, ...@@ -1548,7 +1578,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
} }
for (i = 0; i < val_count * val_bytes; i += val_bytes) for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_val(val + i); map->format.parse_inplace(val + i);
} else { } else {
for (i = 0; i < val_count; i++) { for (i = 0; i < val_count; i++) {
unsigned int ival; unsigned int ival;
...@@ -1642,6 +1672,8 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret) ...@@ -1642,6 +1672,8 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
struct regmap *map = async->map; struct regmap *map = async->map;
bool wake; bool wake;
trace_regmap_async_io_complete(map->dev);
spin_lock(&map->async_lock); spin_lock(&map->async_lock);
list_del(&async->list); list_del(&async->list);
...@@ -1688,6 +1720,8 @@ int regmap_async_complete(struct regmap *map) ...@@ -1688,6 +1720,8 @@ int regmap_async_complete(struct regmap *map)
if (!map->bus->async_write) if (!map->bus->async_write)
return 0; return 0;
trace_regmap_async_complete_start(map->dev);
wait_event(map->async_waitq, regmap_async_is_done(map)); wait_event(map->async_waitq, regmap_async_is_done(map));
spin_lock_irqsave(&map->async_lock, flags); spin_lock_irqsave(&map->async_lock, flags);
...@@ -1695,6 +1729,8 @@ int regmap_async_complete(struct regmap *map) ...@@ -1695,6 +1729,8 @@ int regmap_async_complete(struct regmap *map)
map->async_ret = 0; map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags); spin_unlock_irqrestore(&map->async_lock, flags);
trace_regmap_async_complete_done(map->dev);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(regmap_async_complete); EXPORT_SYMBOL_GPL(regmap_async_complete);
......
...@@ -389,6 +389,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg, ...@@ -389,6 +389,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
bool *change); bool *change);
int regmap_get_val_bytes(struct regmap *map); int regmap_get_val_bytes(struct regmap *map);
int regmap_async_complete(struct regmap *map); int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
int regcache_sync(struct regmap *map); int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min, int regcache_sync_region(struct regmap *map, unsigned int min,
......
...@@ -175,6 +175,54 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass, ...@@ -175,6 +175,54 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
); );
DECLARE_EVENT_CLASS(regmap_async,
TP_PROTO(struct device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
),
TP_printk("%s", __get_str(name))
);
DEFINE_EVENT(regmap_block, regmap_async_write_start,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count)
);
DEFINE_EVENT(regmap_async, regmap_async_io_complete,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
#endif /* _TRACE_REGMAP_H */ #endif /* _TRACE_REGMAP_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment