Commit 7f569e91 authored by Li Ming's avatar Li Ming Committed by Dave Jiang

cxl/port: Use scoped_guard()/guard() to drop device_lock() for cxl_port

A device_lock() and device_unlock() pair can be replaced by a cleanup
helper scoped_guard() or guard(), that can enhance code readability. In
CXL subsystem, still use device_lock() and device_unlock() pairs for cxl
port resource protection, most of them can be replaced by a
scoped_guard() or a guard() simply.
Suggested-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarLi Ming <ming4.li@intel.com>
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://patch.msgid.link/20240830013138.2256244-2-ming4.li@intel.comSigned-off-by: default avatarDave Jiang <dave.jiang@intel.com>
parent dd2617eb
......@@ -1214,7 +1214,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */
device_lock(&cxlmd->dev);
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
down_read(&cxl_region_rwsem);
/*
......@@ -1226,7 +1226,6 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
else
rc = -EBUSY;
up_read(&cxl_region_rwsem);
device_unlock(&cxlmd->dev);
return rc;
}
......
......@@ -1258,18 +1258,13 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
static int add_ep(struct cxl_ep *new)
{
struct cxl_port *port = new->dport->port;
int rc;
device_lock(&port->dev);
if (port->dead) {
device_unlock(&port->dev);
guard(device)(&port->dev);
if (port->dead)
return -ENXIO;
}
rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
GFP_KERNEL);
device_unlock(&port->dev);
return rc;
return xa_insert(&port->endpoints, (unsigned long)new->ep,
new, GFP_KERNEL);
}
/**
......@@ -1393,14 +1388,14 @@ static void delete_endpoint(void *data)
struct cxl_port *endpoint = cxlmd->endpoint;
struct device *host = endpoint_host(endpoint);
device_lock(host);
if (host->driver && !endpoint->dead) {
devm_release_action(host, cxl_unlink_parent_dport, endpoint);
devm_release_action(host, cxl_unlink_uport, endpoint);
devm_release_action(host, unregister_port, endpoint);
scoped_guard(device, host) {
if (host->driver && !endpoint->dead) {
devm_release_action(host, cxl_unlink_parent_dport, endpoint);
devm_release_action(host, cxl_unlink_uport, endpoint);
devm_release_action(host, unregister_port, endpoint);
}
cxlmd->endpoint = NULL;
}
cxlmd->endpoint = NULL;
device_unlock(host);
put_device(&endpoint->dev);
put_device(host);
}
......@@ -1565,40 +1560,38 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
* dereferencing the device of the port before the parent_port releasing.
*/
struct cxl_port *port __free(put_cxl_port) = NULL;
device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
dev_warn(&cxlmd->dev,
"port %s:%s disabled, failed to enumerate CXL.mem\n",
dev_name(&parent_port->dev), dev_name(uport_dev));
port = ERR_PTR(-ENXIO);
goto out;
}
scoped_guard(device, &parent_port->dev) {
if (!parent_port->dev.driver) {
dev_warn(&cxlmd->dev,
"port %s:%s disabled, failed to enumerate CXL.mem\n",
dev_name(&parent_port->dev), dev_name(uport_dev));
return -ENXIO;
}
port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
component_reg_phys, parent_dport);
if (IS_ERR(port))
return PTR_ERR(port);
port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port) {
component_reg_phys = find_component_registers(uport_dev);
port = devm_cxl_add_port(&parent_port->dev, uport_dev,
component_reg_phys, parent_dport);
/* retry find to pick up the new dport information */
if (!IS_ERR(port))
/* retry find to pick up the new dport information */
port = find_cxl_port_at(parent_port, dport_dev, &dport);
if (!port)
return -ENXIO;
}
}
out:
device_unlock(&parent_port->dev);
if (IS_ERR(port))
rc = PTR_ERR(port);
else {
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
* "can't" happen, but this error code means
* something to the caller, so translate it.
*/
rc = -ENXIO;
}
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
* "can't" happen, but this error code means
* something to the caller, so translate it.
*/
rc = -ENXIO;
}
return rc;
......@@ -1979,7 +1972,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
struct cxl_port *port;
int rc;
if (WARN_ON_ONCE(!cxld))
return -EINVAL;
......@@ -1989,11 +1981,8 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
device_lock(&port->dev);
rc = cxl_decoder_add_locked(cxld, target_map);
device_unlock(&port->dev);
return rc;
guard(device)(&port->dev);
return cxl_decoder_add_locked(cxld, target_map);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
......
......@@ -3094,11 +3094,11 @@ static void cxlr_release_nvdimm(void *_cxlr)
struct cxl_region *cxlr = _cxlr;
struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb;
device_lock(&cxl_nvb->dev);
if (cxlr->cxlr_pmem)
devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
cxlr->cxlr_pmem);
device_unlock(&cxl_nvb->dev);
scoped_guard(device, &cxl_nvb->dev) {
if (cxlr->cxlr_pmem)
devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister,
cxlr->cxlr_pmem);
}
cxlr->cxl_nvb = NULL;
put_device(&cxl_nvb->dev);
}
......@@ -3134,13 +3134,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
device_lock(&cxl_nvb->dev);
if (cxl_nvb->dev.driver)
rc = devm_add_action_or_reset(&cxl_nvb->dev,
cxlr_pmem_unregister, cxlr_pmem);
else
rc = -ENXIO;
device_unlock(&cxl_nvb->dev);
scoped_guard(device, &cxl_nvb->dev) {
if (cxl_nvb->dev.driver)
rc = devm_add_action_or_reset(&cxl_nvb->dev,
cxlr_pmem_unregister,
cxlr_pmem);
else
rc = -ENXIO;
}
if (rc)
goto err_bridge;
......
......@@ -168,19 +168,17 @@ static int cxl_mem_probe(struct device *dev)
cxl_setup_parent_dport(dev, dport);
device_lock(endpoint_parent);
if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
dev_name(endpoint_parent));
rc = -ENXIO;
goto unlock;
}
scoped_guard(device, endpoint_parent) {
if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
dev_name(endpoint_parent));
return -ENXIO;
}
rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
unlock:
device_unlock(endpoint_parent);
if (rc)
return rc;
rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport);
if (rc)
return rc;
}
/*
* The kernel may be operating out of CXL memory on this device,
......
......@@ -233,15 +233,13 @@ static int detach_nvdimm(struct device *dev, void *data)
if (!is_cxl_nvdimm(dev))
return 0;
device_lock(dev);
if (!dev->driver)
goto out;
cxl_nvd = to_cxl_nvdimm(dev);
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
release = true;
out:
device_unlock(dev);
scoped_guard(device, dev) {
if (dev->driver) {
cxl_nvd = to_cxl_nvdimm(dev);
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
release = true;
}
}
if (release)
device_release_driver(dev);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment