Commit f979b13c authored by Dan Williams's avatar Dan Williams

libnvdimm, label: honor the lba size specified in v1.2 labels

Previously we only honored the lba size for blk-aperture mode
namespaces. For pmem namespaces the lba size was just assumed to be 512.
With the new v1.2 label definition and compatibility with other
operating environments, the ->lbasize property is now respected for pmem
namespaces.

Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent c12c48ce
...@@ -163,6 +163,29 @@ bool pmem_should_map_pages(struct device *dev) ...@@ -163,6 +163,29 @@ bool pmem_should_map_pages(struct device *dev)
} }
EXPORT_SYMBOL(pmem_should_map_pages); EXPORT_SYMBOL(pmem_should_map_pages);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
{
if (is_namespace_pmem(&ndns->dev)) {
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(&ndns->dev);
if (nspm->lbasize == 0 || nspm->lbasize == 512)
/* default */;
else if (nspm->lbasize == 4096)
return 4096;
else
dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
nspm->lbasize);
}
/*
* There is no namespace label (is_namespace_io()), or the label
* indicates the default sector size.
*/
return 512;
}
EXPORT_SYMBOL(pmem_sector_size);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name) char *name)
{ {
...@@ -1283,28 +1306,49 @@ static ssize_t resource_show(struct device *dev, ...@@ -1283,28 +1306,49 @@ static ssize_t resource_show(struct device *dev,
} }
static DEVICE_ATTR_RO(resource); static DEVICE_ATTR_RO(resource);
static const unsigned long ns_lbasize_supported[] = { 512, 520, 528, static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 }; 4096, 4104, 4160, 4224, 0 };
static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
static ssize_t sector_size_show(struct device *dev, static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
if (!is_namespace_blk(dev)) return nd_sector_size_show(nsblk->lbasize,
return -ENXIO; blk_lbasize_supported, buf);
}
return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf); if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nd_sector_size_show(nspm->lbasize,
pmem_lbasize_supported, buf);
}
return -ENXIO;
} }
static ssize_t sector_size_store(struct device *dev, static ssize_t sector_size_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len) struct device_attribute *attr, const char *buf, size_t len)
{ {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
const unsigned long *supported;
unsigned long *lbasize;
ssize_t rc = 0; ssize_t rc = 0;
if (!is_namespace_blk(dev)) if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
lbasize = &nsblk->lbasize;
supported = blk_lbasize_supported;
} else if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
lbasize = &nspm->lbasize;
supported = pmem_lbasize_supported;
} else
return -ENXIO; return -ENXIO;
device_lock(dev); device_lock(dev);
...@@ -1312,8 +1356,7 @@ static ssize_t sector_size_store(struct device *dev, ...@@ -1312,8 +1356,7 @@ static ssize_t sector_size_store(struct device *dev,
if (to_ndns(dev)->claim) if (to_ndns(dev)->claim)
rc = -EBUSY; rc = -EBUSY;
if (rc >= 0) if (rc >= 0)
rc = nd_sector_size_store(dev, buf, &nsblk->lbasize, rc = nd_sector_size_store(dev, buf, lbasize, supported);
ns_lbasize_supported);
if (rc >= 0) if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev); rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__, dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
...@@ -1458,9 +1501,6 @@ static umode_t namespace_visible(struct kobject *kobj, ...@@ -1458,9 +1501,6 @@ static umode_t namespace_visible(struct kobject *kobj,
if (a == &dev_attr_size.attr) if (a == &dev_attr_size.attr)
return 0644; return 0644;
if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
return 0;
return a->mode; return a->mode;
} }
...@@ -1795,6 +1835,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -1795,6 +1835,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
NSLABEL_NAME_LEN, GFP_KERNEL); NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid, nspm->uuid = kmemdup((void __force *) label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL); NSLABEL_UUID_LEN, GFP_KERNEL);
nspm->lbasize = __le64_to_cpu(label0->lbasize);
} }
if (!nspm->alt_name || !nspm->uuid) { if (!nspm->alt_name || !nspm->uuid) {
......
...@@ -356,6 +356,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); ...@@ -356,6 +356,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name); char *name);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region, void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res); struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM) #if IS_ENABLED(CONFIG_ND_CLAIM)
......
...@@ -342,6 +342,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -342,6 +342,7 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_write_cache(q, true, true); blk_queue_write_cache(q, true, true);
blk_queue_make_request(q, pmem_make_request); blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
......
...@@ -75,12 +75,14 @@ struct nd_namespace_io { ...@@ -75,12 +75,14 @@ struct nd_namespace_io {
/** /**
* struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
* @nsio: device and system physical address range to drive * @nsio: device and system physical address range to drive
* @lbasize: logical sector size for the namespace in block-device-mode
* @alt_name: namespace name supplied in the dimm label * @alt_name: namespace name supplied in the dimm label
* @uuid: namespace name supplied in the dimm label * @uuid: namespace name supplied in the dimm label
* @id: ida allocated id * @id: ida allocated id
*/ */
struct nd_namespace_pmem { struct nd_namespace_pmem {
struct nd_namespace_io nsio; struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name; char *alt_name;
u8 *uuid; u8 *uuid;
int id; int id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment