Commit 39178585 authored by Dan Williams's avatar Dan Williams

cxl/port: Move dport tracking to an xarray

Reduce the complexity and the overhead of walking the topology to
determine endpoint connectivity to root decoder interleave
configurations.

Note that cxl_detach_ep(), after it determines that the last @ep has
departed and decides to delete the port, now needs to walk the dport
array with the device_lock() held to remove entries. Previously
list_splice_init() could be used atomically delete all dport entries at
once and then perform entry tear down outside the lock. There is no
list_splice_init() equivalent for the xarray.
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784331647.1758207.6345820282285119339.stgit@dwillia2-xfh.jf.intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 256d0e9e
...@@ -50,8 +50,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, ...@@ -50,8 +50,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int devm_cxl_add_passthrough_decoder(struct cxl_port *port) int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{ {
struct cxl_switch_decoder *cxlsd; struct cxl_switch_decoder *cxlsd;
struct cxl_dport *dport; struct cxl_dport *dport = NULL;
int single_port_map[1]; int single_port_map[1];
unsigned long index;
cxlsd = cxl_switch_decoder_alloc(port, 1); cxlsd = cxl_switch_decoder_alloc(port, 1);
if (IS_ERR(cxlsd)) if (IS_ERR(cxlsd))
...@@ -59,7 +60,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port) ...@@ -59,7 +60,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev); device_lock_assert(&port->dev);
dport = list_first_entry(&port->dports, typeof(*dport), list); xa_for_each(&port->dports, index, dport)
break;
single_port_map[0] = dport->port_id; single_port_map[0] = dport->port_id;
return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
......
...@@ -452,6 +452,7 @@ static void cxl_port_release(struct device *dev) ...@@ -452,6 +452,7 @@ static void cxl_port_release(struct device *dev)
xa_for_each(&port->endpoints, index, ep) xa_for_each(&port->endpoints, index, ep)
cxl_ep_remove(port, ep); cxl_ep_remove(port, ep);
xa_destroy(&port->endpoints); xa_destroy(&port->endpoints);
xa_destroy(&port->dports);
ida_free(&cxl_port_ida, port->id); ida_free(&cxl_port_ida, port->id);
kfree(port); kfree(port);
} }
...@@ -581,7 +582,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, ...@@ -581,7 +582,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
port->component_reg_phys = component_reg_phys; port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida); ida_init(&port->decoder_ida);
port->hdm_end = -1; port->hdm_end = -1;
INIT_LIST_HEAD(&port->dports); xa_init(&port->dports);
xa_init(&port->endpoints); xa_init(&port->endpoints);
device_initialize(dev); device_initialize(dev);
...@@ -711,17 +712,13 @@ static int match_root_child(struct device *dev, const void *match) ...@@ -711,17 +712,13 @@ static int match_root_child(struct device *dev, const void *match)
return 0; return 0;
port = to_cxl_port(dev); port = to_cxl_port(dev);
device_lock(dev); iter = match;
list_for_each_entry(dport, &port->dports, list) { while (iter) {
iter = match; dport = cxl_find_dport_by_dev(port, iter);
while (iter) { if (dport)
if (iter == dport->dport) break;
goto out; iter = iter->parent;
iter = iter->parent;
}
} }
out:
device_unlock(dev);
return !!iter; return !!iter;
} }
...@@ -745,9 +742,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); ...@@ -745,9 +742,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id) static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{ {
struct cxl_dport *dport; struct cxl_dport *dport;
unsigned long index;
device_lock_assert(&port->dev); device_lock_assert(&port->dev);
list_for_each_entry (dport, &port->dports, list) xa_for_each(&port->dports, index, dport)
if (dport->port_id == id) if (dport->port_id == id)
return dport; return dport;
return NULL; return NULL;
...@@ -759,15 +757,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new) ...@@ -759,15 +757,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new)
device_lock_assert(&port->dev); device_lock_assert(&port->dev);
dup = find_dport(port, new->port_id); dup = find_dport(port, new->port_id);
if (dup) if (dup) {
dev_err(&port->dev, dev_err(&port->dev,
"unable to add dport%d-%s non-unique port id (%s)\n", "unable to add dport%d-%s non-unique port id (%s)\n",
new->port_id, dev_name(new->dport), new->port_id, dev_name(new->dport),
dev_name(dup->dport)); dev_name(dup->dport));
else return -EBUSY;
list_add_tail(&new->list, &port->dports); }
return xa_insert(&port->dports, (unsigned long)new->dport, new,
return dup ? -EEXIST : 0; GFP_KERNEL);
} }
/* /*
...@@ -794,10 +792,8 @@ static void cxl_dport_remove(void *data) ...@@ -794,10 +792,8 @@ static void cxl_dport_remove(void *data)
struct cxl_dport *dport = data; struct cxl_dport *dport = data;
struct cxl_port *port = dport->port; struct cxl_port *port = dport->port;
xa_erase(&port->dports, (unsigned long) dport->dport);
put_device(dport->dport); put_device(dport->dport);
cond_cxl_root_lock(port);
list_del(&dport->list);
cond_cxl_root_unlock(port);
} }
static void cxl_dport_unlink(void *data) static void cxl_dport_unlink(void *data)
...@@ -849,7 +845,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, ...@@ -849,7 +845,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
if (!dport) if (!dport)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&dport->list);
dport->dport = dport_dev; dport->dport = dport_dev;
dport->port_id = port_id; dport->port_id = port_id;
dport->component_reg_phys = component_reg_phys; dport->component_reg_phys = component_reg_phys;
...@@ -1040,19 +1035,27 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); ...@@ -1040,19 +1035,27 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
* for a port to be unregistered is when all memdevs beneath that port have gone * for a port to be unregistered is when all memdevs beneath that port have gone
* through ->remove(). This "bottom-up" removal selectively removes individual * through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is * child ports manually. This depends on devm_cxl_add_port() to not change is
* devm action registration order. * devm action registration order, and for dports to have already been
* destroyed by reap_dports().
*/ */
static void delete_switch_port(struct cxl_port *port, struct list_head *dports) static void delete_switch_port(struct cxl_port *port)
{
devm_release_action(port->dev.parent, cxl_unlink_uport, port);
devm_release_action(port->dev.parent, unregister_port, port);
}
static void reap_dports(struct cxl_port *port)
{ {
struct cxl_dport *dport, *_d; struct cxl_dport *dport;
unsigned long index;
list_for_each_entry_safe(dport, _d, dports, list) { device_lock_assert(&port->dev);
xa_for_each(&port->dports, index, dport) {
devm_release_action(&port->dev, cxl_dport_unlink, dport); devm_release_action(&port->dev, cxl_dport_unlink, dport);
devm_release_action(&port->dev, cxl_dport_remove, dport); devm_release_action(&port->dev, cxl_dport_remove, dport);
devm_kfree(&port->dev, dport); devm_kfree(&port->dev, dport);
} }
devm_release_action(port->dev.parent, cxl_unlink_uport, port);
devm_release_action(port->dev.parent, unregister_port, port);
} }
static struct cxl_ep *cxl_ep_load(struct cxl_port *port, static struct cxl_ep *cxl_ep_load(struct cxl_port *port,
...@@ -1069,8 +1072,8 @@ static void cxl_detach_ep(void *data) ...@@ -1069,8 +1072,8 @@ static void cxl_detach_ep(void *data)
for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) { for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) {
struct device *dport_dev = grandparent(iter); struct device *dport_dev = grandparent(iter);
struct cxl_port *port, *parent_port; struct cxl_port *port, *parent_port;
LIST_HEAD(reap_dports);
struct cxl_ep *ep; struct cxl_ep *ep;
bool died = false;
if (!dport_dev) if (!dport_dev)
break; break;
...@@ -1110,15 +1113,16 @@ static void cxl_detach_ep(void *data) ...@@ -1110,15 +1113,16 @@ static void cxl_detach_ep(void *data)
* enumerated port. Block new cxl_add_ep() and garbage * enumerated port. Block new cxl_add_ep() and garbage
* collect the port. * collect the port.
*/ */
died = true;
port->dead = true; port->dead = true;
list_splice_init(&port->dports, &reap_dports); reap_dports(port);
} }
device_unlock(&port->dev); device_unlock(&port->dev);
if (!list_empty(&reap_dports)) { if (died) {
dev_dbg(&cxlmd->dev, "delete %s\n", dev_dbg(&cxlmd->dev, "delete %s\n",
dev_name(&port->dev)); dev_name(&port->dev));
delete_switch_port(port, &reap_dports); delete_switch_port(port);
} }
put_device(&port->dev); put_device(&port->dev);
device_unlock(&parent_port->dev); device_unlock(&parent_port->dev);
...@@ -1297,23 +1301,6 @@ struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, ...@@ -1297,23 +1301,6 @@ struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
} }
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
const struct device *dev)
{
struct cxl_dport *dport;
device_lock(&port->dev);
list_for_each_entry(dport, &port->dports, list)
if (dport->dport == dev) {
device_unlock(&port->dev);
return dport;
}
device_unlock(&port->dev);
return NULL;
}
EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map) struct cxl_port *port, int *target_map)
{ {
...@@ -1324,7 +1311,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, ...@@ -1324,7 +1311,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
device_lock_assert(&port->dev); device_lock_assert(&port->dev);
if (list_empty(&port->dports)) if (xa_empty(&port->dports))
return -EINVAL; return -EINVAL;
write_seqlock(&cxlsd->target_lock); write_seqlock(&cxlsd->target_lock);
......
...@@ -346,7 +346,7 @@ struct cxl_port { ...@@ -346,7 +346,7 @@ struct cxl_port {
struct device *uport; struct device *uport;
struct device *host_bridge; struct device *host_bridge;
int id; int id;
struct list_head dports; struct xarray dports;
struct xarray endpoints; struct xarray endpoints;
struct cxl_dport *parent_dport; struct cxl_dport *parent_dport;
struct ida decoder_ida; struct ida decoder_ida;
...@@ -361,20 +361,24 @@ struct cxl_port { ...@@ -361,20 +361,24 @@ struct cxl_port {
bool cdat_available; bool cdat_available;
}; };
static inline struct cxl_dport *
cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
{
return xa_load(&port->dports, (unsigned long)dport_dev);
}
/** /**
* struct cxl_dport - CXL downstream port * struct cxl_dport - CXL downstream port
* @dport: PCI bridge or firmware device representing the downstream link * @dport: PCI bridge or firmware device representing the downstream link
* @port_id: unique hardware identifier for dport in decoder target list * @port_id: unique hardware identifier for dport in decoder target list
* @component_reg_phys: downstream port component registers * @component_reg_phys: downstream port component registers
* @port: reference to cxl_port that contains this downstream port * @port: reference to cxl_port that contains this downstream port
* @list: node for a cxl_port's list of cxl_dport instances
*/ */
struct cxl_dport { struct cxl_dport {
struct device *dport; struct device *dport;
int port_id; int port_id;
resource_size_t component_reg_phys; resource_size_t component_reg_phys;
struct cxl_port *port; struct cxl_port *port;
struct list_head list;
}; };
/** /**
...@@ -417,8 +421,6 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); ...@@ -417,8 +421,6 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
struct device *dport, int port_id, struct device *dport, int port_id,
resource_size_t component_reg_phys); resource_size_t component_reg_phys);
struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
const struct device *dev);
struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_decoder *to_cxl_decoder(struct device *dev);
struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment