Commit 39b14286 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'regmap-fix-v6.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap

Pull regmap fixes from Mark Brown:
 "Three fixes here:

   - The issues with accounting for register and padding length on raw
     buses turn out to be quite widespread in custom buses.

     In order to avoid disturbing anything drop the initial fixes and
     fall back to a point fix in the SMBus code where the issue was
     originally noticed, a more substantial refactoring of the API which
     ensures that all buses make the same assumptions will follow.

   - The generic regcache code had been forcing on async I/O which did
     not work with the new maple tree sync code when used with SPI.

     Since that was mainly for the rbtree cache and the assumptions
     about hardware that drove the choice are probably not true any more
     fix this by pushing the enablement of async down into the rbtree
     code.

     This probably also makes cache syncs for systems faster though it's
     not the point.

   - The test code was triggering use of the rbtree and maple tree
     caches with dynamic allocation of nodes since all the testing is
     with RAM backed caches with no I/O performance issues.

     Just disable the locking in the tests to avoid triggering warnings
     when allocation debugging is turned on, it's not really what's
     being tested"

* tag 'regmap-fix-v6.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap:
  regmap: Disable locking for RBTREE and MAPLE unit tests
  regcache: Push async I/O request down into the rbtree cache
  regmap: Account for register length in SMBus I/O limits
  regmap: Drop initial version of maximum transfer length fixes
parents c0842db5 a9e26169
......@@ -471,6 +471,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
unsigned int start, end;
int ret;
map->async = true;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
......@@ -499,6 +501,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
return ret;
}
map->async = false;
return regmap_async_complete(map);
}
......
......@@ -368,8 +368,6 @@ int regcache_sync(struct regmap *map)
if (!map->cache_dirty)
goto out;
map->async = true;
/* Apply any patch first */
map->cache_bypass = true;
for (i = 0; i < map->patch_regs; i++) {
......@@ -392,7 +390,6 @@ int regcache_sync(struct regmap *map)
out:
/* Restore the bypass state */
map->async = false;
map->cache_bypass = bypass;
map->no_sync_defaults = false;
map->unlock(map->lock_arg);
......
......@@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
.write = regmap_i2c_smbus_i2c_write,
.read = regmap_i2c_smbus_i2c_read,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
};
static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
......@@ -299,8 +299,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
.write = regmap_i2c_smbus_i2c_write_reg16,
.read = regmap_i2c_smbus_i2c_read_reg16,
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
};
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
......
......@@ -58,6 +58,9 @@ static struct regmap *gen_regmap(struct regmap_config *config,
int i;
struct reg_default *defaults;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
......@@ -889,6 +892,8 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
config->cache_type = test_type->cache_type;
config->val_format_endian = test_type->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
......
......@@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
.max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
.free_context = spi_avmm_bridge_ctx_free,
};
......
......@@ -2082,8 +2082,6 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t val_count = val_len / val_bytes;
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
size_t max_data = map->max_raw_write - map->format.reg_bytes -
map->format.pad_bytes;
int ret, i;
if (!val_count)
......@@ -2091,8 +2089,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->use_single_write)
chunk_regs = 1;
else if (map->max_raw_write && val_len > max_data)
chunk_regs = max_data / val_bytes;
else if (map->max_raw_write && val_len > map->max_raw_write)
chunk_regs = map->max_raw_write / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment