Commit 8faea9fd authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/cache: Move the cache per-port data into the main ib_port_data

Like the other cases there no real reason to have another array just for
the cache. This larger conversion gets its own patch.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 8ceb1357
...@@ -185,7 +185,7 @@ EXPORT_SYMBOL(ib_cache_gid_parse_type_str); ...@@ -185,7 +185,7 @@ EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port) static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
{ {
return device->cache.ports[port - rdma_start_port(device)].gid; return device->port_data[port].cache.gid;
} }
static bool is_gid_entry_free(const struct ib_gid_table_entry *entry) static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
...@@ -765,7 +765,7 @@ static struct ib_gid_table *alloc_gid_table(int sz) ...@@ -765,7 +765,7 @@ static struct ib_gid_table *alloc_gid_table(int sz)
return NULL; return NULL;
} }
static void release_gid_table(struct ib_device *device, u8 port, static void release_gid_table(struct ib_device *device,
struct ib_gid_table *table) struct ib_gid_table *table)
{ {
bool leak = false; bool leak = false;
...@@ -863,31 +863,27 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, ...@@ -863,31 +863,27 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
static void gid_table_release_one(struct ib_device *ib_dev) static void gid_table_release_one(struct ib_device *ib_dev)
{ {
struct ib_gid_table *table; unsigned int p;
u8 port;
for (port = 0; port < ib_dev->phys_port_cnt; port++) { rdma_for_each_port (ib_dev, p) {
table = ib_dev->cache.ports[port].gid; release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
release_gid_table(ib_dev, port, table); ib_dev->port_data[p].cache.gid = NULL;
ib_dev->cache.ports[port].gid = NULL;
} }
} }
static int _gid_table_setup_one(struct ib_device *ib_dev) static int _gid_table_setup_one(struct ib_device *ib_dev)
{ {
u8 port;
struct ib_gid_table *table; struct ib_gid_table *table;
unsigned int rdma_port;
for (port = 0; port < ib_dev->phys_port_cnt; port++) { rdma_for_each_port (ib_dev, rdma_port) {
u8 rdma_port = port + rdma_start_port(ib_dev);
table = alloc_gid_table( table = alloc_gid_table(
ib_dev->port_data[rdma_port].immutable.gid_tbl_len); ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
if (!table) if (!table)
goto rollback_table_setup; goto rollback_table_setup;
gid_table_reserve_default(ib_dev, rdma_port, table); gid_table_reserve_default(ib_dev, rdma_port, table);
ib_dev->cache.ports[port].gid = table; ib_dev->port_data[rdma_port].cache.gid = table;
} }
return 0; return 0;
...@@ -898,14 +894,11 @@ static int _gid_table_setup_one(struct ib_device *ib_dev) ...@@ -898,14 +894,11 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
static void gid_table_cleanup_one(struct ib_device *ib_dev) static void gid_table_cleanup_one(struct ib_device *ib_dev)
{ {
struct ib_gid_table *table; unsigned int p;
u8 port;
for (port = 0; port < ib_dev->phys_port_cnt; port++) { rdma_for_each_port (ib_dev, p)
table = ib_dev->cache.ports[port].gid; cleanup_gid_table_port(ib_dev, p,
cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), ib_dev->port_data[p].cache.gid);
table);
}
} }
static int gid_table_setup_one(struct ib_device *ib_dev) static int gid_table_setup_one(struct ib_device *ib_dev)
...@@ -983,17 +976,17 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, ...@@ -983,17 +976,17 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
unsigned long mask = GID_ATTR_FIND_MASK_GID | unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE; GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type}; struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
u8 p; unsigned int p;
if (ndev) if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV; mask |= GID_ATTR_FIND_MASK_NETDEV;
for (p = 0; p < device->phys_port_cnt; p++) { rdma_for_each_port(device, p) {
struct ib_gid_table *table; struct ib_gid_table *table;
unsigned long flags; unsigned long flags;
int index; int index;
table = device->cache.ports[p].gid; table = device->port_data[p].cache.gid;
read_lock_irqsave(&table->rwlock, flags); read_lock_irqsave(&table->rwlock, flags);
index = find_gid(table, gid, &gid_attr_val, false, mask, NULL); index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
if (index >= 0) { if (index >= 0) {
...@@ -1025,7 +1018,7 @@ int ib_get_cached_pkey(struct ib_device *device, ...@@ -1025,7 +1018,7 @@ int ib_get_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags); read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey; cache = device->port_data[port_num].cache.pkey;
if (index < 0 || index >= cache->table_len) if (index < 0 || index >= cache->table_len)
ret = -EINVAL; ret = -EINVAL;
...@@ -1043,14 +1036,12 @@ int ib_get_cached_subnet_prefix(struct ib_device *device, ...@@ -1043,14 +1036,12 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx) u64 *sn_pfx)
{ {
unsigned long flags; unsigned long flags;
int p;
if (!rdma_is_port_valid(device, port_num)) if (!rdma_is_port_valid(device, port_num))
return -EINVAL; return -EINVAL;
p = port_num - rdma_start_port(device);
read_lock_irqsave(&device->cache.lock, flags); read_lock_irqsave(&device->cache.lock, flags);
*sn_pfx = device->cache.ports[p].subnet_prefix; *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
read_unlock_irqrestore(&device->cache.lock, flags); read_unlock_irqrestore(&device->cache.lock, flags);
return 0; return 0;
...@@ -1073,7 +1064,7 @@ int ib_find_cached_pkey(struct ib_device *device, ...@@ -1073,7 +1064,7 @@ int ib_find_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags); read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey; cache = device->port_data[port_num].cache.pkey;
*index = -1; *index = -1;
...@@ -1113,7 +1104,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device, ...@@ -1113,7 +1104,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags); read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey; cache = device->port_data[port_num].cache.pkey;
*index = -1; *index = -1;
...@@ -1141,7 +1132,7 @@ int ib_get_cached_lmc(struct ib_device *device, ...@@ -1141,7 +1132,7 @@ int ib_get_cached_lmc(struct ib_device *device,
return -EINVAL; return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags); read_lock_irqsave(&device->cache.lock, flags);
*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc; *lmc = device->port_data[port_num].cache.lmc;
read_unlock_irqrestore(&device->cache.lock, flags); read_unlock_irqrestore(&device->cache.lock, flags);
return ret; return ret;
...@@ -1159,8 +1150,7 @@ int ib_get_cached_port_state(struct ib_device *device, ...@@ -1159,8 +1150,7 @@ int ib_get_cached_port_state(struct ib_device *device,
return -EINVAL; return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags); read_lock_irqsave(&device->cache.lock, flags);
*port_state = device->cache.ports[port_num *port_state = device->port_data[port_num].cache.port_state;
- rdma_start_port(device)].port_state;
read_unlock_irqrestore(&device->cache.lock, flags); read_unlock_irqrestore(&device->cache.lock, flags);
return ret; return ret;
...@@ -1361,16 +1351,13 @@ static void ib_cache_update(struct ib_device *device, ...@@ -1361,16 +1351,13 @@ static void ib_cache_update(struct ib_device *device,
write_lock_irq(&device->cache.lock); write_lock_irq(&device->cache.lock);
old_pkey_cache = device->cache.ports[port - old_pkey_cache = device->port_data[port].cache.pkey;
rdma_start_port(device)].pkey;
device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache; device->port_data[port].cache.pkey = pkey_cache;
device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc; device->port_data[port].cache.lmc = tprops->lmc;
device->cache.ports[port - rdma_start_port(device)].port_state = device->port_data[port].cache.port_state = tprops->state;
tprops->state;
device->cache.ports[port - rdma_start_port(device)].subnet_prefix = device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
tprops->subnet_prefix;
write_unlock_irq(&device->cache.lock); write_unlock_irq(&device->cache.lock);
if (enforce_security) if (enforce_security)
...@@ -1433,19 +1420,9 @@ int ib_cache_setup_one(struct ib_device *device) ...@@ -1433,19 +1420,9 @@ int ib_cache_setup_one(struct ib_device *device)
rwlock_init(&device->cache.lock); rwlock_init(&device->cache.lock);
device->cache.ports =
kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
sizeof(*device->cache.ports),
GFP_KERNEL);
if (!device->cache.ports)
return -ENOMEM;
err = gid_table_setup_one(device); err = gid_table_setup_one(device);
if (err) { if (err)
kfree(device->cache.ports);
device->cache.ports = NULL;
return err; return err;
}
rdma_for_each_port (device, p) rdma_for_each_port (device, p)
ib_cache_update(device, p, true); ib_cache_update(device, p, true);
...@@ -1458,10 +1435,7 @@ int ib_cache_setup_one(struct ib_device *device) ...@@ -1458,10 +1435,7 @@ int ib_cache_setup_one(struct ib_device *device)
void ib_cache_release_one(struct ib_device *device) void ib_cache_release_one(struct ib_device *device)
{ {
int p; unsigned int p;
if (!device->cache.ports)
return;
/* /*
* The release function frees all the cache elements. * The release function frees all the cache elements.
...@@ -1469,11 +1443,10 @@ void ib_cache_release_one(struct ib_device *device) ...@@ -1469,11 +1443,10 @@ void ib_cache_release_one(struct ib_device *device)
* all the device's resources when the cache could no * all the device's resources when the cache could no
* longer be accessed. * longer be accessed.
*/ */
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) rdma_for_each_port (device, p)
kfree(device->cache.ports[p].pkey); kfree(device->port_data[p].cache.pkey);
gid_table_release_one(device); gid_table_release_one(device);
kfree(device->cache.ports);
} }
void ib_cache_cleanup_one(struct ib_device *device) void ib_cache_cleanup_one(struct ib_device *device)
......
...@@ -2186,7 +2186,6 @@ struct ib_port_cache { ...@@ -2186,7 +2186,6 @@ struct ib_port_cache {
struct ib_cache { struct ib_cache {
rwlock_t lock; rwlock_t lock;
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct ib_port_cache *ports;
}; };
struct iw_cm_verbs; struct iw_cm_verbs;
...@@ -2203,6 +2202,8 @@ struct ib_port_data { ...@@ -2203,6 +2202,8 @@ struct ib_port_data {
spinlock_t pkey_list_lock; spinlock_t pkey_list_lock;
struct list_head pkey_list; struct list_head pkey_list;
struct ib_port_cache cache;
}; };
/* rdma netdev type - specifies protocol type */ /* rdma netdev type - specifies protocol type */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment