Commit ee96dd96 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'libnvdimm-for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Dan Williams:
 "The update for this cycle includes the deprecation of block-aperture
  mode and a new perf events interface for the papr_scm nvdimm driver.

  The perf events approach was acked by PeterZ.

   - Add perf support for nvdimm events, initially only for 'papr_scm'
     devices.

   - Deprecate the 'block aperture' support in libnvdimm, it only ever
     existed in the specification, not in shipping product"

* tag 'libnvdimm-for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  nvdimm/blk: Fix title level
  MAINTAINERS: remove section LIBNVDIMM BLK: MMIO-APERTURE DRIVER
  powerpc/papr_scm: Fix build failure when
  drivers/nvdimm: Fix build failure when CONFIG_PERF_EVENTS is not set
  nvdimm/region: Delete nd_blk_region infrastructure
  ACPI: NFIT: Remove block aperture support
  nvdimm/namespace: Delete nd_namespace_blk
  nvdimm/namespace: Delete blk namespace consideration in shared paths
  nvdimm/blk: Delete the block-aperture window driver
  nvdimm/region: Fix default alignment for small regions
  docs: ABI: sysfs-bus-nvdimm: Document sysfs event format entries for nvdimm pmu
  powerpc/papr_scm: Add perf interface support
  drivers/nvdimm: Add perf interface to expose nvdimm performance stats
  drivers/nvdimm: Add nvdimm pmu structure
parents d888c83f ada8d8d3
......@@ -6,3 +6,38 @@ Description:
The libnvdimm sub-system implements a common sysfs interface for
platform nvdimm resources. See Documentation/driver-api/nvdimm/.
What: /sys/bus/event_source/devices/nmemX/format
Date: February 2022
KernelVersion: 5.18
Contact: Kajol Jain <kjain@linux.ibm.com>
Description: (RO) Attribute group to describe the magic bits
that go into perf_event_attr.config for a particular pmu.
(See ABI/testing/sysfs-bus-event_source-devices-format).
Each attribute under this group defines a bit range of the
perf_event_attr.config. Supported attribute is listed
below::
event = "config:0-4" - event ID
For example::
ctl_res_cnt = "event=0x1"
What: /sys/bus/event_source/devices/nmemX/events
Date: February 2022
KernelVersion: 5.18
Contact: Kajol Jain <kjain@linux.ibm.com>
Description: (RO) Attribute group to describe performance monitoring events
for the nvdimm memory device. Each attribute in this group
describes a single performance monitoring event supported by
this nvdimm pmu. The name of the file is the name of the event.
(See ABI/testing/sysfs-bus-event_source-devices-events). A
listing of the events supported by a given nvdimm provider type
can be found in Documentation/driver-api/nvdimm/$provider.
What: /sys/bus/event_source/devices/nmemX/cpumask
Date: February 2022
KernelVersion: 5.18
Contact: Kajol Jain <kjain@linux.ibm.com>
Description: (RO) This sysfs file exposes the cpumask which is designated to
to retrieve nvdimm pmu event counter data.
This diff is collapsed.
......@@ -11121,17 +11121,6 @@ F: drivers/ata/
F: include/linux/ata.h
F: include/linux/libata.h
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: nvdimm@lists.linux.dev
S: Supported
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
P: Documentation/nvdimm/maintainer-entry-profile.rst
F: drivers/nvdimm/blk.c
F: drivers/nvdimm/region_devs.c
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
......
......@@ -48,6 +48,11 @@ struct dev_archdata {
struct pdev_archdata {
u64 dma_mask;
/*
* Pointer to nvdimm_pmu structure, to handle the unregistering
* of pmu device
*/
void *priv;
};
#endif /* _ASM_POWERPC_DEVICE_H */
......@@ -19,6 +19,7 @@
#include <asm/papr_pdsm.h>
#include <asm/mce.h>
#include <asm/unaligned.h>
#include <linux/perf_event.h>
#define BIND_ANY_ADDR (~0ul)
......@@ -124,6 +125,8 @@ struct papr_scm_priv {
/* The bits which needs to be overridden */
u64 health_bitmap_inject_mask;
/* array to have event_code and stat_id mappings */
char **nvdimm_events_map;
};
static int papr_scm_pmem_flush(struct nd_region *nd_region,
......@@ -344,6 +347,225 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
return 0;
}
#ifdef CONFIG_PERF_EVENTS
#define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data;
int rc, size;
/* Allocate request buffer enough to hold single performance stat */
size = sizeof(struct papr_scm_perf_stats) +
sizeof(struct papr_scm_perf_stat);
if (!p || !p->nvdimm_events_map)
return -EINVAL;
stats = kzalloc(size, GFP_KERNEL);
if (!stats)
return -ENOMEM;
stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id,
p->nvdimm_events_map[event->attr.config],
sizeof(stat->stat_id));
stat->stat_val = 0;
rc = drc_pmem_query_stats(p, stats, 1);
if (rc < 0) {
kfree(stats);
return rc;
}
*count = be64_to_cpu(stat->stat_val);
kfree(stats);
return 0;
}
static int papr_scm_pmu_event_init(struct perf_event *event)
{
struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
struct papr_scm_priv *p;
if (!nd_pmu)
return -EINVAL;
/* test the event attr type for PMU enumeration */
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* it does not support event sampling mode */
if (is_sampling_event(event))
return -EOPNOTSUPP;
/* no branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
p = (struct papr_scm_priv *)nd_pmu->dev->driver_data;
if (!p)
return -EINVAL;
/* Invalid eventcode */
if (event->attr.config == 0 || event->attr.config > 16)
return -EINVAL;
return 0;
}
static int papr_scm_pmu_add(struct perf_event *event, int flags)
{
u64 count;
int rc;
struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
if (!nd_pmu)
return -EINVAL;
if (flags & PERF_EF_START) {
rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &count);
if (rc)
return rc;
local64_set(&event->hw.prev_count, count);
}
return 0;
}
static void papr_scm_pmu_read(struct perf_event *event)
{
u64 prev, now;
int rc;
struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
if (!nd_pmu)
return;
rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &now);
if (rc)
return;
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void papr_scm_pmu_del(struct perf_event *event, int flags)
{
papr_scm_pmu_read(event);
}
static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
char *statid;
int index, rc, count;
u32 available_events;
if (!p->stat_buffer_len)
return -ENOENT;
available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
/ sizeof(struct papr_scm_perf_stat);
/* Allocate the buffer for phyp where stats are written */
stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
if (!stats) {
rc = -ENOMEM;
return rc;
}
/* Allocate memory to nvdimm_event_map */
p->nvdimm_events_map = kcalloc(available_events, sizeof(char *), GFP_KERNEL);
if (!p->nvdimm_events_map) {
rc = -ENOMEM;
goto out_stats;
}
/* Called to get list of events supported */
rc = drc_pmem_query_stats(p, stats, 0);
if (rc)
goto out_nvdimm_events_map;
for (index = 0, stat = stats->scm_statistic, count = 0;
index < available_events; index++, ++stat) {
statid = kzalloc(strlen(stat->stat_id) + 1, GFP_KERNEL);
if (!statid) {
rc = -ENOMEM;
goto out_nvdimm_events_map;
}
strcpy(statid, stat->stat_id);
p->nvdimm_events_map[count] = statid;
count++;
}
p->nvdimm_events_map[count] = NULL;
kfree(stats);
return 0;
out_nvdimm_events_map:
kfree(p->nvdimm_events_map);
out_stats:
kfree(stats);
return rc;
}
static void papr_scm_pmu_register(struct papr_scm_priv *p)
{
struct nvdimm_pmu *nd_pmu;
int rc, nodeid;
nd_pmu = kzalloc(sizeof(*nd_pmu), GFP_KERNEL);
if (!nd_pmu) {
rc = -ENOMEM;
goto pmu_err_print;
}
rc = papr_scm_pmu_check_events(p, nd_pmu);
if (rc)
goto pmu_check_events_err;
nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
nd_pmu->pmu.event_init = papr_scm_pmu_event_init;
nd_pmu->pmu.read = papr_scm_pmu_read;
nd_pmu->pmu.add = papr_scm_pmu_add;
nd_pmu->pmu.del = papr_scm_pmu_del;
nd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
PERF_PMU_CAP_NO_EXCLUDE;
/*updating the cpumask variable */
nodeid = numa_map_to_online_node(dev_to_node(&p->pdev->dev));
nd_pmu->arch_cpumask = *cpumask_of_node(nodeid);
rc = register_nvdimm_pmu(nd_pmu, p->pdev);
if (rc)
goto pmu_register_err;
/*
* Set archdata.priv value to nvdimm_pmu structure, to handle the
* unregistering of pmu device.
*/
p->pdev->archdata.priv = nd_pmu;
return;
pmu_register_err:
kfree(p->nvdimm_events_map);
pmu_check_events_err:
kfree(nd_pmu);
pmu_err_print:
dev_info(&p->pdev->dev, "nvdimm pmu didn't register rc=%d\n", rc);
}
#else
static void papr_scm_pmu_register(struct papr_scm_priv *p) { }
#endif
/*
* Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
* health information.
......@@ -1320,6 +1542,7 @@ static int papr_scm_probe(struct platform_device *pdev)
goto err2;
platform_set_drvdata(pdev, p);
papr_scm_pmu_register(p);
return 0;
......@@ -1338,6 +1561,12 @@ static int papr_scm_remove(struct platform_device *pdev)
nvdimm_bus_unregister(p->bus);
drc_pmem_unbind(p);
if (pdev->archdata.priv)
unregister_nvdimm_pmu(pdev->archdata.priv);
pdev->archdata.priv = NULL;
kfree(p->nvdimm_events_map);
kfree(p->bus_desc.provider_name);
kfree(p);
......
This diff is collapsed.
......@@ -208,13 +208,9 @@ struct nfit_mem {
struct nvdimm *nvdimm;
struct acpi_nfit_memory_map *memdev_dcr;
struct acpi_nfit_memory_map *memdev_pmem;
struct acpi_nfit_memory_map *memdev_bdw;
struct acpi_nfit_control_region *dcr;
struct acpi_nfit_data_region *bdw;
struct acpi_nfit_system_address *spa_dcr;
struct acpi_nfit_system_address *spa_bdw;
struct acpi_nfit_interleave *idt_dcr;
struct acpi_nfit_interleave *idt_bdw;
struct kernfs_node *flags_attr;
struct nfit_flush *nfit_flush;
struct list_head list;
......@@ -266,8 +262,6 @@ struct acpi_nfit_desc {
unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1];
unsigned int platform_cap;
unsigned int scrub_tmo;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
enum nvdimm_fwa_state fwa_state;
enum nvdimm_fwa_capability fwa_cap;
int fwa_count;
......
......@@ -10,12 +10,9 @@ menuconfig LIBNVDIMM
ACPI-6-NFIT defined resources. On platforms that define an
NFIT, or otherwise can discover NVDIMM resources, a libnvdimm
bus is registered to advertise PMEM (persistent memory)
namespaces (/dev/pmemX) and BLK (sliding mmio window(s))
namespaces (/dev/ndblkX.Y). A PMEM namespace refers to a
namespaces (/dev/pmemX). A PMEM namespace refers to a
memory resource that may span multiple DIMMs and support DAX
(see CONFIG_DAX). A BLK namespace refers to an NVDIMM control
region which exposes an mmio register set for windowed access
mode to non-volatile memory.
(see CONFIG_DAX).
if LIBNVDIMM
......@@ -38,19 +35,6 @@ config BLK_DEV_PMEM
Say Y if you want to use an NVDIMM
config ND_BLK
tristate "BLK: Block data window (aperture) device support"
default LIBNVDIMM
select ND_BTT if BTT
help
Support NVDIMMs, or other devices, that implement a BLK-mode
access capability. BLK-mode access uses memory-mapped-i/o
apertures to access persistent media.
Say Y if your platform firmware emits an ACPI.NFIT table
(CONFIG_ACPI_NFIT), or otherwise exposes BLK-mode
capabilities.
config ND_CLAIM
bool
......@@ -67,9 +51,8 @@ config BTT
applications that rely on sector writes not being torn (a
guarantee that typical disks provide) can continue to do so.
The BTT manifests itself as an alternate personality for an
NVDIMM namespace, i.e. a namespace can be in raw mode (pmemX,
ndblkX.Y, etc...), or 'sectored' mode, (pmemXs, ndblkX.Ys,
etc...).
NVDIMM namespace, i.e. a namespace can be in raw mode pmemX,
or 'sectored' mode.
Select Y if unsure
......
......@@ -2,7 +2,6 @@
obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
......@@ -11,13 +10,12 @@ nd_pmem-y := pmem.o
nd_btt-y := btt.o
nd_blk-y := blk.o
nd_e820-y := e820.o
libnvdimm-y := core.o
libnvdimm-y += bus.o
libnvdimm-y += dimm_devs.o
libnvdimm-$(CONFIG_PERF_EVENTS) += nd_perf.o
libnvdimm-y += dimm.o
libnvdimm-y += region_devs.o
libnvdimm-y += region.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* NVDIMM Block Window Driver
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/nd.h>
#include <linux/sizes.h>
#include "nd.h"
static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
{
return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
}
static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
{
return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
}
static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
{
return nsblk->lbasize - nsblk_meta_size(nsblk);
}
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
resource_size_t ns_offset, unsigned int len)
{
int i;
for (i = 0; i < nsblk->num_resources; i++) {
if (ns_offset < resource_size(nsblk->res[i])) {
if (ns_offset + len > resource_size(nsblk->res[i])) {
dev_WARN_ONCE(&nsblk->common.dev, 1,
"illegal request\n");
return SIZE_MAX;
}
return nsblk->res[i]->start + ns_offset;
}
ns_offset -= resource_size(nsblk->res[i]);
}
dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n");
return SIZE_MAX;
}
static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
{
struct nd_region *nd_region;
struct device *parent;
parent = nsblk->common.dev.parent;
nd_region = container_of(parent, struct nd_region, dev);
return container_of(nd_region, struct nd_blk_region, nd_region);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
struct bio_integrity_payload *bip, u64 lba, int rw)
{
struct nd_blk_region *ndbr = to_ndbr(nsblk);
unsigned int len = nsblk_meta_size(nsblk);
resource_size_t dev_offset, ns_offset;
u32 internal_lbasize, sector_size;
int err = 0;
internal_lbasize = nsblk_internal_lbasize(nsblk);
sector_size = nsblk_sector_size(nsblk);
ns_offset = lba * internal_lbasize + sector_size;
dev_offset = to_dev_offset(nsblk, ns_offset, len);
if (dev_offset == SIZE_MAX)
return -EIO;
while (len) {
unsigned int cur_len;
struct bio_vec bv;
void *iobuf;
bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
/*
* The 'bv' obtained from bvec_iter_bvec has its .bv_len and
* .bv_offset already adjusted for iter->bi_bvec_done, and we
* can use those directly
*/
cur_len = min(len, bv.bv_len);
iobuf = bvec_kmap_local(&bv);
err = ndbr->do_io(ndbr, dev_offset, iobuf, cur_len, rw);
kunmap_local(iobuf);
if (err)
return err;
len -= cur_len;
dev_offset += cur_len;
if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
return -EIO;
}
return err;
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
struct bio_integrity_payload *bip, u64 lba, int rw)
{
return 0;
}
#endif
static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
struct bio_integrity_payload *bip, struct page *page,
unsigned int len, unsigned int off, int rw, sector_t sector)
{
struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset, ns_offset;
u32 internal_lbasize, sector_size;
int err = 0;
void *iobuf;
u64 lba;
internal_lbasize = nsblk_internal_lbasize(nsblk);
sector_size = nsblk_sector_size(nsblk);
while (len) {
unsigned int cur_len;
/*
* If we don't have an integrity payload, we don't have to
* split the bvec into sectors, as this would cause unnecessary
* Block Window setup/move steps. the do_io routine is capable
* of handling len <= PAGE_SIZE.
*/
cur_len = bip ? min(len, sector_size) : len;
lba = div_u64(sector << SECTOR_SHIFT, sector_size);
ns_offset = lba * internal_lbasize;
dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
if (dev_offset == SIZE_MAX)
return -EIO;
iobuf = kmap_atomic(page);
err = ndbr->do_io(ndbr, dev_offset, iobuf + off, cur_len, rw);
kunmap_atomic(iobuf);
if (err)
return err;
if (bip) {
err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
if (err)
return err;
}
len -= cur_len;
off += cur_len;
sector += sector_size >> SECTOR_SHIFT;
}
return err;
}
static void nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
int err = 0, rw;
bool do_acct;
if (!bio_integrity_prep(bio))
return;
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
start = bio_start_io_acct(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
BUG_ON(len > PAGE_SIZE);
err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
bvec.bv_offset, rw, iter.bi_sector);
if (err) {
dev_dbg(&nsblk->common.dev,
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
}
if (do_acct)
bio_end_io_acct(bio, start);
bio_endio(bio);
}
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
resource_size_t offset, void *iobuf, size_t n, int rw,
unsigned long flags)
{
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset;
dev_offset = to_dev_offset(nsblk, offset, n);
if (unlikely(offset + n > nsblk->size)) {
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
return -EFAULT;
}
if (dev_offset == SIZE_MAX)
return -EIO;
return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw);
}
static const struct block_device_operations nd_blk_fops = {
.owner = THIS_MODULE,
.submit_bio = nd_blk_submit_bio,
};
static void nd_blk_release_disk(void *disk)
{
del_gendisk(disk);
blk_cleanup_disk(disk);
}
static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
{
struct device *dev = &nsblk->common.dev;
resource_size_t available_disk_size;
struct gendisk *disk;
u64 internal_nlba;
int rc;
internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
return -ENOMEM;
disk->fops = &nd_blk_fops;
disk->private_data = nsblk;
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
blk_queue_max_hw_sectors(disk->queue, UINT_MAX);
blk_queue_logical_block_size(disk->queue, nsblk_sector_size(nsblk));
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
if (nsblk_meta_size(nsblk)) {
rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
if (rc)
goto out_before_devm_err;
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
rc = device_add_disk(dev, disk, NULL);
if (rc)
goto out_before_devm_err;
/* nd_blk_release_disk() is called if this fails */
if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
return -ENOMEM;
nvdimm_check_and_set_ro(disk);
return 0;
out_before_devm_err:
blk_cleanup_disk(disk);
return rc;
}
static int nd_blk_probe(struct device *dev)
{
struct nd_namespace_common *ndns;
struct nd_namespace_blk *nsblk;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return PTR_ERR(ndns);
nsblk = to_nd_namespace_blk(&ndns->dev);
nsblk->size = nvdimm_namespace_capacity(ndns);
dev_set_drvdata(dev, nsblk);
ndns->rw_bytes = nsblk_rw_bytes;
if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
else if (nd_btt_probe(dev, ndns) == 0) {
/* we'll come back as btt-blk */
return -ENXIO;
} else
return nsblk_attach_disk(nsblk);
}
static void nd_blk_remove(struct device *dev)
{
if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev));
}
static struct nd_device_driver nd_blk_driver = {
.probe = nd_blk_probe,
.remove = nd_blk_remove,
.drv = {
.name = "nd_blk",
},
.type = ND_DRIVER_NAMESPACE_BLK,
};
static int __init nd_blk_init(void)
{
return nd_driver_register(&nd_blk_driver);
}
static void __exit nd_blk_exit(void)
{
driver_unregister(&nd_blk_driver.drv);
}
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_BLK);
module_init(nd_blk_init);
module_exit(nd_blk_exit);
......@@ -34,8 +34,6 @@ static int to_nd_device_type(struct device *dev)
return ND_DEVICE_DIMM;
else if (is_memory(dev))
return ND_DEVICE_REGION_PMEM;
else if (is_nd_blk(dev))
return ND_DEVICE_REGION_BLK;
else if (is_nd_dax(dev))
return ND_DEVICE_DAX_PMEM;
else if (is_nd_region(dev->parent))
......
......@@ -18,10 +18,6 @@
static DEFINE_IDA(dimm_ida);
static bool noblk;
module_param(noblk, bool, 0444);
MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
/*
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
......@@ -211,22 +207,6 @@ struct nvdimm *to_nvdimm(struct device *dev)
}
EXPORT_SYMBOL_GPL(to_nvdimm);
struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
{
struct nd_region *nd_region = &ndbr->nd_region;
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
return nd_mapping->nvdimm;
}
EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
{
/* pmem mapping properties are private to libnvdimm */
return ARCH_MEMREMAP_PMEM;
}
EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
{
struct nvdimm *nvdimm = nd_mapping->nvdimm;
......@@ -312,8 +292,7 @@ static ssize_t flags_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
return sprintf(buf, "%s%s%s\n",
test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
return sprintf(buf, "%s%s\n",
test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
}
......@@ -612,8 +591,6 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
nvdimm->dimm_id = dimm_id;
nvdimm->provider_data = provider_data;
if (noblk)
flags |= 1 << NDD_NOBLK;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
nvdimm->num_flush = num_flush;
......@@ -726,133 +703,6 @@ static unsigned long dpa_align(struct nd_region *nd_region)
return nd_region->align / nd_region->ndr_mappings;
}
int alias_dpa_busy(struct device *dev, void *data)
{
resource_size_t map_end, blk_start, new;
struct blk_alloc_info *info = data;
struct nd_mapping *nd_mapping;
struct nd_region *nd_region;
struct nvdimm_drvdata *ndd;
struct resource *res;
unsigned long align;
int i;
if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_mapping = &nd_region->mapping[i];
if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
break;
}
if (i >= nd_region->ndr_mappings)
return 0;
ndd = to_ndd(nd_mapping);
map_end = nd_mapping->start + nd_mapping->size - 1;
blk_start = nd_mapping->start;
/*
* In the allocation case ->res is set to free space that we are
* looking to validate against PMEM aliasing collision rules
* (i.e. BLK is allocated after all aliased PMEM).
*/
if (info->res) {
if (info->res->start >= nd_mapping->start
&& info->res->start < map_end)
/* pass */;
else
return 0;
}
retry:
/*
* Find the free dpa from the end of the last pmem allocation to
* the end of the interleave-set mapping.
*/
align = dpa_align(nd_region);
if (!align)
return 0;
for_each_dpa_resource(ndd, res) {
resource_size_t start, end;
if (strncmp(res->name, "pmem", 4) != 0)
continue;
start = ALIGN_DOWN(res->start, align);
end = ALIGN(res->end + 1, align) - 1;
if ((start >= blk_start && start < map_end)
|| (end >= blk_start && end <= map_end)) {
new = max(blk_start, min(map_end, end) + 1);
if (new != blk_start) {
blk_start = new;
goto retry;
}
}
}
/* update the free space range with the probed blk_start */
if (info->res && blk_start > info->res->start) {
info->res->start = max(info->res->start, blk_start);
if (info->res->start > info->res->end)
info->res->end = info->res->start - 1;
return 1;
}
info->available -= blk_start - nd_mapping->start;
return 0;
}
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @nd_mapping: container of dpa-resource-root + labels
*
* Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
* we arrange for them to never start at an lower dpa than the last
* PMEM allocation in an aliased region.
*/
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct blk_alloc_info info = {
.nd_mapping = nd_mapping,
.available = nd_mapping->size,
.res = NULL,
};
struct resource *res;
unsigned long align;
if (!ndd)
return 0;
device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
/* now account for busy blk allocations in unaliased dpa */
align = dpa_align(nd_region);
if (!align)
return 0;
for_each_dpa_resource(ndd, res) {
resource_size_t start, end, size;
if (strncmp(res->name, "blk", 3) != 0)
continue;
start = ALIGN_DOWN(res->start, align);
end = ALIGN(res->end + 1, align) - 1;
size = end - start + 1;
if (size >= info.available)
return 0;
info.available -= size;
}
return info.available;
}
/**
* nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
* contiguous unallocated dpa range.
......@@ -900,24 +750,16 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
* nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
* @nd_mapping: container of dpa-resource-root + labels
* @nd_region: constrain available space check to this reference region
* @overlap: calculate available space assuming this level of overlap
*
* Validate that a PMEM label, if present, aligns with the start of an
* interleave set and truncate the available size at the lowest BLK
* overlap point.
*
* The expectation is that this routine is called multiple times as it
* probes for the largest BLK encroachment for any single member DIMM of
* the interleave set. Once that value is determined the PMEM-limit for
* the set can be established.
* interleave set.
*/
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, resource_size_t *overlap)
struct nd_mapping *nd_mapping)
{
resource_size_t map_start, map_end, busy = 0, available, blk_start;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
resource_size_t map_start, map_end, busy = 0;
struct resource *res;
const char *reason;
unsigned long align;
if (!ndd)
......@@ -929,46 +771,28 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
map_start = nd_mapping->start;
map_end = map_start + nd_mapping->size - 1;
blk_start = max(map_start, map_end + 1 - *overlap);
for_each_dpa_resource(ndd, res) {
resource_size_t start, end;
start = ALIGN_DOWN(res->start, align);
end = ALIGN(res->end + 1, align) - 1;
if (start >= map_start && start < map_end) {
if (strncmp(res->name, "blk", 3) == 0)
blk_start = min(blk_start,
max(map_start, start));
else if (end > map_end) {
reason = "misaligned to iset";
goto err;
} else
busy += end - start + 1;
if (end > map_end) {
nd_dbg_dpa(nd_region, ndd, res,
"misaligned to iset\n");
return 0;
}
busy += end - start + 1;
} else if (end >= map_start && end <= map_end) {
if (strncmp(res->name, "blk", 3) == 0) {
/*
* If a BLK allocation overlaps the start of
* PMEM the entire interleave set may now only
* be used for BLK.
*/
blk_start = map_start;
} else
busy += end - start + 1;
busy += end - start + 1;
} else if (map_start > start && map_start < end) {
/* total eclipse of the mapping */
busy += nd_mapping->size;
blk_start = map_start;
}
}
*overlap = map_end + 1 - blk_start;
available = blk_start - map_start;
if (busy < available)
return ALIGN_DOWN(available - busy, align);
return 0;
err:
nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
if (busy < nd_mapping->size)
return ALIGN_DOWN(nd_mapping->size - busy, align);
return 0;
}
......@@ -999,7 +823,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
/**
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
* @nvdimm: container of dpa-resource-root + labels
* @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
* @label_id: dpa resource name of the form pmem-<human readable uuid>
*/
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id)
......
This diff is collapsed.
......@@ -193,7 +193,7 @@ struct nd_namespace_label {
/**
* struct nd_label_id - identifier string for dpa allocation
* @id: "{blk|pmem}-<namespace uuid>"
* @id: "pmem-<namespace uuid>"
*/
struct nd_label_id {
char id[ND_LABEL_ID_SIZE];
......@@ -221,9 +221,6 @@ bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size);
int nd_blk_namespace_label_update(struct nd_region *nd_region,
struct nd_namespace_blk *nsblk, resource_size_t size);
#endif /* __LABEL_H__ */
This diff is collapsed.
......@@ -82,30 +82,12 @@ static inline void nvdimm_security_overwrite_query(struct work_struct *work)
}
#endif
/**
* struct blk_alloc_info - tracking info for BLK dpa scanning
* @nd_mapping: blk region mapping boundaries
* @available: decremented in alias_dpa_busy as aliased PMEM is scanned
* @busy: decremented in blk_dpa_busy to account for ranges already
* handled by alias_dpa_busy
* @res: alias_dpa_busy interprets this a free space range that needs to
* be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
* treats it as a busy range that needs the aliased PMEM ranges
* truncated.
*/
struct blk_alloc_info {
struct nd_mapping *nd_mapping;
resource_size_t available, busy;
struct resource *res;
};
bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev);
bool is_nd_volatile(struct device *dev);
bool is_nd_blk(struct device *dev);
static inline bool is_nd_region(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
return is_nd_pmem(dev) || is_nd_volatile(dev);
}
static inline bool is_memory(struct device *dev)
{
......@@ -142,17 +124,12 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping);
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, resource_size_t *overlap);
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
struct nd_mapping *nd_mapping);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id);
int alias_dpa_busy(struct device *dev, void *data);
struct resource *nsblk_add_resource(struct nd_region *nd_region,
struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
resource_size_t start);
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
void get_ndd(struct nvdimm_drvdata *ndd);
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
......
......@@ -295,9 +295,6 @@ static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd,
return nd_label->efi.uuid;
}
bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 isetcookie);
bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, guid_t *guid);
enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
......@@ -437,14 +434,6 @@ static inline bool nsl_validate_nlabel(struct nd_region *nd_region,
return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings;
}
struct nd_blk_region {
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
void *blk_provider_data;
struct nd_region nd_region;
};
/*
* Lookup next in the repeating sequence of 01, 10, and 11.
*/
......@@ -672,7 +661,6 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
return -ENXIO;
}
#endif
int nd_blk_region_init(struct nd_region *nd_region);
int nd_region_activate(struct nd_region *nd_region);
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
......@@ -687,7 +675,6 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
return false;
}
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
const uuid_t *nd_dev_to_uuid(struct device *dev);
bool pmem_should_map_pages(struct device *dev);
#endif /* __ND_H__ */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* nd_perf.c: NVDIMM Device Performance Monitoring Unit support
*
* Perf interface to expose nvdimm performance stats.
*
* Copyright (C) 2021 IBM Corporation
*/
#define pr_fmt(fmt) "nvdimm_pmu: " fmt
#include <linux/nd.h>
#include <linux/platform_device.h>
#define EVENT(_name, _code) enum{_name = _code}
/*
* NVDIMM Events codes.
*/
/* Controller Reset Count */
EVENT(CTL_RES_CNT, 0x1);
/* Controller Reset Elapsed Time */
EVENT(CTL_RES_TM, 0x2);
/* Power-on Seconds */
EVENT(POWERON_SECS, 0x3);
/* Life Remaining */
EVENT(MEM_LIFE, 0x4);
/* Critical Resource Utilization */
EVENT(CRI_RES_UTIL, 0x5);
/* Host Load Count */
EVENT(HOST_L_CNT, 0x6);
/* Host Store Count */
EVENT(HOST_S_CNT, 0x7);
/* Host Store Duration */
EVENT(HOST_S_DUR, 0x8);
/* Host Load Duration */
EVENT(HOST_L_DUR, 0x9);
/* Media Read Count */
EVENT(MED_R_CNT, 0xa);
/* Media Write Count */
EVENT(MED_W_CNT, 0xb);
/* Media Read Duration */
EVENT(MED_R_DUR, 0xc);
/* Media Write Duration */
EVENT(MED_W_DUR, 0xd);
/* Cache Read Hit Count */
EVENT(CACHE_RH_CNT, 0xe);
/* Cache Write Hit Count */
EVENT(CACHE_WH_CNT, 0xf);
/* Fast Write Count */
EVENT(FAST_W_CNT, 0x10);
NVDIMM_EVENT_ATTR(ctl_res_cnt, CTL_RES_CNT);
NVDIMM_EVENT_ATTR(ctl_res_tm, CTL_RES_TM);
NVDIMM_EVENT_ATTR(poweron_secs, POWERON_SECS);
NVDIMM_EVENT_ATTR(mem_life, MEM_LIFE);
NVDIMM_EVENT_ATTR(cri_res_util, CRI_RES_UTIL);
NVDIMM_EVENT_ATTR(host_l_cnt, HOST_L_CNT);
NVDIMM_EVENT_ATTR(host_s_cnt, HOST_S_CNT);
NVDIMM_EVENT_ATTR(host_s_dur, HOST_S_DUR);
NVDIMM_EVENT_ATTR(host_l_dur, HOST_L_DUR);
NVDIMM_EVENT_ATTR(med_r_cnt, MED_R_CNT);
NVDIMM_EVENT_ATTR(med_w_cnt, MED_W_CNT);
NVDIMM_EVENT_ATTR(med_r_dur, MED_R_DUR);
NVDIMM_EVENT_ATTR(med_w_dur, MED_W_DUR);
NVDIMM_EVENT_ATTR(cache_rh_cnt, CACHE_RH_CNT);
NVDIMM_EVENT_ATTR(cache_wh_cnt, CACHE_WH_CNT);
NVDIMM_EVENT_ATTR(fast_w_cnt, FAST_W_CNT);
static struct attribute *nvdimm_events_attr[] = {
NVDIMM_EVENT_PTR(CTL_RES_CNT),
NVDIMM_EVENT_PTR(CTL_RES_TM),
NVDIMM_EVENT_PTR(POWERON_SECS),
NVDIMM_EVENT_PTR(MEM_LIFE),
NVDIMM_EVENT_PTR(CRI_RES_UTIL),
NVDIMM_EVENT_PTR(HOST_L_CNT),
NVDIMM_EVENT_PTR(HOST_S_CNT),
NVDIMM_EVENT_PTR(HOST_S_DUR),
NVDIMM_EVENT_PTR(HOST_L_DUR),
NVDIMM_EVENT_PTR(MED_R_CNT),
NVDIMM_EVENT_PTR(MED_W_CNT),
NVDIMM_EVENT_PTR(MED_R_DUR),
NVDIMM_EVENT_PTR(MED_W_DUR),
NVDIMM_EVENT_PTR(CACHE_RH_CNT),
NVDIMM_EVENT_PTR(CACHE_WH_CNT),
NVDIMM_EVENT_PTR(FAST_W_CNT),
NULL
};
static struct attribute_group nvdimm_pmu_events_group = {
.name = "events",
.attrs = nvdimm_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-4");
static struct attribute *nvdimm_pmu_format_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group nvdimm_pmu_format_group = {
.name = "format",
.attrs = nvdimm_pmu_format_attr,
};
ssize_t nvdimm_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
}
static ssize_t nvdimm_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pmu *pmu = dev_get_drvdata(dev);
struct nvdimm_pmu *nd_pmu;
nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu);
return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu));
}
static int nvdimm_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
struct nvdimm_pmu *nd_pmu;
u32 target;
int nodeid;
const struct cpumask *cpumask;
nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
/* Clear it, incase given cpu is set in nd_pmu->arch_cpumask */
cpumask_test_and_clear_cpu(cpu, &nd_pmu->arch_cpumask);
/*
* If given cpu is not same as current designated cpu for
* counter access, just return.
*/
if (cpu != nd_pmu->cpu)
return 0;
/* Check for any active cpu in nd_pmu->arch_cpumask */
target = cpumask_any(&nd_pmu->arch_cpumask);
/*
* Incase we don't have any active cpu in nd_pmu->arch_cpumask,
* check in given cpu's numa node list.
*/
if (target >= nr_cpu_ids) {
nodeid = cpu_to_node(cpu);
cpumask = cpumask_of_node(nodeid);
target = cpumask_any_but(cpumask, cpu);
}
nd_pmu->cpu = target;
/* Migrate nvdimm pmu events to the new target cpu if valid */
if (target >= 0 && target < nr_cpu_ids)
perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target);
return 0;
}
static int nvdimm_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct nvdimm_pmu *nd_pmu;
nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
if (nd_pmu->cpu >= nr_cpu_ids)
nd_pmu->cpu = cpu;
return 0;
}
static int create_cpumask_attr_group(struct nvdimm_pmu *nd_pmu)
{
struct perf_pmu_events_attr *pmu_events_attr;
struct attribute **attrs_group;
struct attribute_group *nvdimm_pmu_cpumask_group;
pmu_events_attr = kzalloc(sizeof(*pmu_events_attr), GFP_KERNEL);
if (!pmu_events_attr)
return -ENOMEM;
attrs_group = kzalloc(2 * sizeof(struct attribute *), GFP_KERNEL);
if (!attrs_group) {
kfree(pmu_events_attr);
return -ENOMEM;
}
/* Allocate memory for cpumask attribute group */
nvdimm_pmu_cpumask_group = kzalloc(sizeof(*nvdimm_pmu_cpumask_group), GFP_KERNEL);
if (!nvdimm_pmu_cpumask_group) {
kfree(pmu_events_attr);
kfree(attrs_group);
return -ENOMEM;
}
sysfs_attr_init(&pmu_events_attr->attr.attr);
pmu_events_attr->attr.attr.name = "cpumask";
pmu_events_attr->attr.attr.mode = 0444;
pmu_events_attr->attr.show = nvdimm_pmu_cpumask_show;
attrs_group[0] = &pmu_events_attr->attr.attr;
attrs_group[1] = NULL;
nvdimm_pmu_cpumask_group->attrs = attrs_group;
nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = nvdimm_pmu_cpumask_group;
return 0;
}
static int nvdimm_pmu_cpu_hotplug_init(struct nvdimm_pmu *nd_pmu)
{
int nodeid, rc;
const struct cpumask *cpumask;
/*
* Incase of cpu hotplug feature, arch specific code
* can provide required cpumask which can be used
* to get designatd cpu for counter access.
* Check for any active cpu in nd_pmu->arch_cpumask.
*/
if (!cpumask_empty(&nd_pmu->arch_cpumask)) {
nd_pmu->cpu = cpumask_any(&nd_pmu->arch_cpumask);
} else {
/* pick active cpu from the cpumask of device numa node. */
nodeid = dev_to_node(nd_pmu->dev);
cpumask = cpumask_of_node(nodeid);
nd_pmu->cpu = cpumask_any(cpumask);
}
rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/nvdimm:online",
nvdimm_pmu_cpu_online, nvdimm_pmu_cpu_offline);
if (rc < 0)
return rc;
nd_pmu->cpuhp_state = rc;
/* Register the pmu instance for cpu hotplug */
rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
if (rc) {
cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
return rc;
}
/* Create cpumask attribute group */
rc = create_cpumask_attr_group(nd_pmu);
if (rc) {
cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
return rc;
}
return 0;
}
static void nvdimm_pmu_free_hotplug_memory(struct nvdimm_pmu *nd_pmu)
{
cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
if (nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR])
kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]->attrs);
kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]);
}
int register_nvdimm_pmu(struct nvdimm_pmu *nd_pmu, struct platform_device *pdev)
{
int rc;
if (!nd_pmu || !pdev)
return -EINVAL;
/* event functions like add/del/read/event_init and pmu name should not be NULL */
if (WARN_ON_ONCE(!(nd_pmu->pmu.event_init && nd_pmu->pmu.add &&
nd_pmu->pmu.del && nd_pmu->pmu.read && nd_pmu->pmu.name)))
return -EINVAL;
nd_pmu->pmu.attr_groups = kzalloc((NVDIMM_PMU_NULL_ATTR + 1) *
sizeof(struct attribute_group *), GFP_KERNEL);
if (!nd_pmu->pmu.attr_groups)
return -ENOMEM;
/*
* Add platform_device->dev pointer to nvdimm_pmu to access
* device data in events functions.
*/
nd_pmu->dev = &pdev->dev;
/* Fill attribute groups for the nvdimm pmu device */
nd_pmu->pmu.attr_groups[NVDIMM_PMU_FORMAT_ATTR] = &nvdimm_pmu_format_group;
nd_pmu->pmu.attr_groups[NVDIMM_PMU_EVENT_ATTR] = &nvdimm_pmu_events_group;
nd_pmu->pmu.attr_groups[NVDIMM_PMU_NULL_ATTR] = NULL;
/* Fill attribute group for cpumask */
rc = nvdimm_pmu_cpu_hotplug_init(nd_pmu);
if (rc) {
pr_info("cpu hotplug feature failed for device: %s\n", nd_pmu->pmu.name);
kfree(nd_pmu->pmu.attr_groups);
return rc;
}
rc = perf_pmu_register(&nd_pmu->pmu, nd_pmu->pmu.name, -1);
if (rc) {
kfree(nd_pmu->pmu.attr_groups);
nvdimm_pmu_free_hotplug_memory(nd_pmu);
return rc;
}
pr_info("%s NVDIMM performance monitor support registered\n",
nd_pmu->pmu.name);
return 0;
}
EXPORT_SYMBOL_GPL(register_nvdimm_pmu);
void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu)
{
perf_pmu_unregister(&nd_pmu->pmu);
nvdimm_pmu_free_hotplug_memory(nd_pmu);
kfree(nd_pmu);
}
EXPORT_SYMBOL_GPL(unregister_nvdimm_pmu);
......@@ -15,6 +15,10 @@ static int nd_region_probe(struct device *dev)
static unsigned long once;
struct nd_region_data *ndrd;
struct nd_region *nd_region = to_nd_region(dev);
struct range range = {
.start = nd_region->ndr_start,
.end = nd_region->ndr_start + nd_region->ndr_size - 1,
};
if (nd_region->num_lanes > num_online_cpus()
&& nd_region->num_lanes < num_possible_cpus()
......@@ -30,25 +34,13 @@ static int nd_region_probe(struct device *dev)
if (rc)
return rc;
rc = nd_blk_region_init(nd_region);
if (rc)
return rc;
if (is_memory(&nd_region->dev)) {
struct range range = {
.start = nd_region->ndr_start,
.end = nd_region->ndr_start + nd_region->ndr_size - 1,
};
if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV;
nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
"badblocks");
if (!nd_region->bb_state)
dev_warn(&nd_region->dev,
"'badblocks' notification disabled\n");
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
}
if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV;
nd_region->bb_state =
sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
if (!nd_region->bb_state)
dev_warn(dev, "'badblocks' notification disabled\n");
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
rc = nd_region_register_namespaces(nd_region, &err);
if (rc < 0)
......@@ -158,4 +150,3 @@ void nd_region_exit(void)
}
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);
......@@ -134,10 +134,7 @@ static void nd_region_release(struct device *dev)
}
free_percpu(nd_region->lane);
memregion_free(nd_region->id);
if (is_nd_blk(dev))
kfree(to_nd_blk_region(dev));
else
kfree(nd_region);
kfree(nd_region);
}
struct nd_region *to_nd_region(struct device *dev)
......@@ -157,33 +154,12 @@ struct device *nd_region_dev(struct nd_region *nd_region)
}
EXPORT_SYMBOL_GPL(nd_region_dev);
struct nd_blk_region *to_nd_blk_region(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev);
WARN_ON(!is_nd_blk(dev));
return container_of(nd_region, struct nd_blk_region, nd_region);
}
EXPORT_SYMBOL_GPL(to_nd_blk_region);
void *nd_region_provider_data(struct nd_region *nd_region)
{
return nd_region->provider_data;
}
EXPORT_SYMBOL_GPL(nd_region_provider_data);
void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
{
return ndbr->blk_provider_data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
{
ndbr->blk_provider_data = data;
}
EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
/**
* nd_region_to_nstype() - region to an integer namespace type
* @nd_region: region-device to interrogate
......@@ -208,8 +184,6 @@ int nd_region_to_nstype(struct nd_region *nd_region)
return ND_DEVICE_NAMESPACE_PMEM;
else
return ND_DEVICE_NAMESPACE_IO;
} else if (is_nd_blk(&nd_region->dev)) {
return ND_DEVICE_NAMESPACE_BLK;
}
return 0;
......@@ -332,14 +306,12 @@ static DEVICE_ATTR_RO(set_cookie);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
{
resource_size_t blk_max_overlap = 0, available, overlap;
resource_size_t available;
int i;
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
retry:
available = 0;
overlap = blk_max_overlap;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
......@@ -348,15 +320,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
if (!ndd)
return 0;
if (is_memory(&nd_region->dev)) {
available += nd_pmem_available_dpa(nd_region,
nd_mapping, &overlap);
if (overlap > blk_max_overlap) {
blk_max_overlap = overlap;
goto retry;
}
} else if (is_nd_blk(&nd_region->dev))
available += nd_blk_available_dpa(nd_region);
available += nd_pmem_available_dpa(nd_region, nd_mapping);
}
return available;
......@@ -364,26 +328,17 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
{
resource_size_t available = 0;
resource_size_t avail = 0;
int i;
if (is_memory(&nd_region->dev))
available = PHYS_ADDR_MAX;
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
if (is_memory(&nd_region->dev))
available = min(available,
nd_pmem_max_contiguous_dpa(nd_region,
nd_mapping));
else if (is_nd_blk(&nd_region->dev))
available += nd_blk_available_dpa(nd_region);
avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa(
nd_region, nd_mapping));
}
if (is_memory(&nd_region->dev))
return available * nd_region->ndr_mappings;
return available;
return avail * nd_region->ndr_mappings;
}
static ssize_t available_size_show(struct device *dev,
......@@ -693,9 +648,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
&& a != &dev_attr_available_size.attr)
return a->mode;
if ((type == ND_DEVICE_NAMESPACE_PMEM
|| type == ND_DEVICE_NAMESPACE_BLK)
&& a == &dev_attr_available_size.attr)
if (type == ND_DEVICE_NAMESPACE_PMEM &&
a == &dev_attr_available_size.attr)
return a->mode;
else if (is_memory(dev) && nd_set)
return a->mode;
......@@ -828,12 +782,6 @@ static const struct attribute_group *nd_region_attribute_groups[] = {
NULL,
};
static const struct device_type nd_blk_device_type = {
.name = "nd_blk",
.release = nd_region_release,
.groups = nd_region_attribute_groups,
};
static const struct device_type nd_pmem_device_type = {
.name = "nd_pmem",
.release = nd_region_release,
......@@ -851,11 +799,6 @@ bool is_nd_pmem(struct device *dev)
return dev ? dev->type == &nd_pmem_device_type : false;
}
bool is_nd_blk(struct device *dev)
{
return dev ? dev->type == &nd_blk_device_type : false;
}
bool is_nd_volatile(struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
......@@ -929,22 +872,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
nvdimm_bus_unlock(dev);
}
int nd_blk_region_init(struct nd_region *nd_region)
{
struct device *dev = &nd_region->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
if (!is_nd_blk(dev))
return 0;
if (nd_region->ndr_mappings < 1) {
dev_dbg(dev, "invalid BLK region\n");
return -ENXIO;
}
return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
}
/**
* nd_region_acquire_lane - allocate and lock a lane
* @nd_region: region id and number of lanes possible
......@@ -1007,23 +934,12 @@ EXPORT_SYMBOL(nd_region_release_lane);
static unsigned long default_align(struct nd_region *nd_region)
{
unsigned long align;
int i, mappings;
u32 remainder;
int mappings;
if (is_nd_blk(&nd_region->dev))
align = MEMREMAP_COMPAT_ALIGN_MAX;
if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
align = PAGE_SIZE;
else
align = MEMREMAP_COMPAT_ALIGN_MAX;
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
align = MEMREMAP_COMPAT_ALIGN_MAX;
break;
}
}
mappings = max_t(u16, 1, nd_region->ndr_mappings);
div_u64_rem(align, mappings, &remainder);
......@@ -1039,7 +955,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
{
struct nd_region *nd_region;
struct device *dev;
void *region_buf;
unsigned int i;
int ro = 0;
......@@ -1057,36 +972,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
if (test_bit(NDD_NOBLK, &nvdimm->flags)
&& dev_type == &nd_blk_device_type) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
caller, dev_name(&nvdimm->dev), i);
return NULL;
}
}
if (dev_type == &nd_blk_device_type) {
struct nd_blk_region_desc *ndbr_desc;
struct nd_blk_region *ndbr;
ndbr_desc = to_blk_region_desc(ndr_desc);
ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
* ndr_desc->num_mappings,
GFP_KERNEL);
if (ndbr) {
nd_region = &ndbr->nd_region;
ndbr->enable = ndbr_desc->enable;
ndbr->do_io = ndbr_desc->do_io;
}
region_buf = ndbr;
} else {
nd_region = kzalloc(struct_size(nd_region, mapping,
ndr_desc->num_mappings),
GFP_KERNEL);
region_buf = nd_region;
}
nd_region =
kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings),
GFP_KERNEL);
if (!region_buf)
if (!nd_region)
return NULL;
nd_region->id = memregion_alloc(GFP_KERNEL);
if (nd_region->id < 0)
......@@ -1150,7 +1042,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
err_percpu:
memregion_free(nd_region->id);
err_id:
kfree(region_buf);
kfree(nd_region);
return NULL;
}
......@@ -1163,17 +1055,6 @@ struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc)
{
if (ndr_desc->num_mappings > 1)
return NULL;
ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc)
{
......@@ -1198,7 +1079,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
}
/**
* nvdimm_flush - flush any posted write queues between the cpu and pmem media
* @nd_region: blk or interleaved pmem region
* @nd_region: interleaved pmem region
*/
int generic_nvdimm_flush(struct nd_region *nd_region)
{
......@@ -1231,7 +1112,7 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
/**
* nvdimm_has_flush - determine write flushing requirements
* @nd_region: blk or interleaved pmem region
* @nd_region: interleaved pmem region
*
* Returns 1 if writes require flushing
* Returns 0 if writes do not require flushing
......
......@@ -25,8 +25,6 @@ struct badrange {
};
enum {
/* when a dimm supports both PMEM and BLK access a label is required */
NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
......@@ -35,8 +33,6 @@ enum {
NDD_SECURITY_OVERWRITE = 3,
/* tracking whether or not there is a pending device reference */
NDD_WORK_PENDING = 4,
/* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
NDD_NOBLK = 5,
/* dimm supports namespace labels */
NDD_LABELING = 6,
......@@ -140,21 +136,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
}
struct nvdimm_bus;
struct module;
struct nd_blk_region;
struct nd_blk_region_desc {
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
struct nd_region_desc ndr_desc;
};
static inline struct nd_blk_region_desc *to_blk_region_desc(
struct nd_region_desc *ndr_desc)
{
return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
}
/*
* Note that separate bits for locked + unlocked are defined so that
......@@ -257,7 +238,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
struct device *nd_region_dev(struct nd_region *nd_region);
struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
......@@ -295,10 +275,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
void *nd_region_provider_data(struct nd_region *nd_region);
void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
......
......@@ -8,6 +8,7 @@
#include <linux/ndctl.h>
#include <linux/device.h>
#include <linux/badblocks.h>
#include <linux/perf_event.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
......@@ -23,6 +24,57 @@ enum nvdimm_claim_class {
NVDIMM_CCLASS_UNKNOWN,
};
#define NVDIMM_EVENT_VAR(_id) event_attr_##_id
#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
#define NVDIMM_EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
nvdimm_events_sysfs_show)
/* Event attribute array index */
#define NVDIMM_PMU_FORMAT_ATTR 0
#define NVDIMM_PMU_EVENT_ATTR 1
#define NVDIMM_PMU_CPUMASK_ATTR 2
#define NVDIMM_PMU_NULL_ATTR 3
/**
* struct nvdimm_pmu - data structure for nvdimm perf driver
* @pmu: pmu data structure for nvdimm performance stats.
* @dev: nvdimm device pointer.
* @cpu: designated cpu for counter access.
* @node: node for cpu hotplug notifier link.
* @cpuhp_state: state for cpu hotplug notification.
* @arch_cpumask: cpumask to get designated cpu for counter access.
*/
struct nvdimm_pmu {
struct pmu pmu;
struct device *dev;
int cpu;
struct hlist_node node;
enum cpuhp_state cpuhp_state;
/* cpumask provided by arch/platform specific code */
struct cpumask arch_cpumask;
};
struct platform_device;
#ifdef CONFIG_PERF_EVENTS
extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page);
int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
#else
static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
{
return -ENXIO;
}
static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
#endif
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
......@@ -92,27 +144,6 @@ struct nd_namespace_pmem {
int id;
};
/**
* struct nd_namespace_blk - namespace for dimm-bounded persistent memory
* @alt_name: namespace name supplied in the dimm label
* @uuid: namespace name supplied in the dimm label
* @id: ida allocated id
* @lbasize: blk namespaces have a native sector size when btt not present
* @size: sum of all the resource ranges allocated to this namespace
* @num_resources: number of dpa extents to claim
* @res: discontiguous dpa extents for given dimm
*/
struct nd_namespace_blk {
struct nd_namespace_common common;
char *alt_name;
uuid_t *uuid;
int id;
unsigned long lbasize;
resource_size_t size;
int num_resources;
struct resource **res;
};
static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
{
return container_of(dev, struct nd_namespace_io, common.dev);
......@@ -125,11 +156,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device
return container_of(nsio, struct nd_namespace_pmem, nsio);
}
static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
{
return container_of(dev, struct nd_namespace_blk, common.dev);
}
/**
* nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
* @ndns: device to read
......
......@@ -189,7 +189,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
#define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */
#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */
enum nd_driver_flags {
......@@ -198,7 +197,6 @@ enum nd_driver_flags {
ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK,
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
......
......@@ -27,7 +27,6 @@ ccflags-y += -I$(srctree)/drivers/acpi/nfit/
obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_ACPI_NFIT) += nfit.o
ifeq ($(CONFIG_DAX),m)
......@@ -50,9 +49,6 @@ nd_pmem-y += config_check.o
nd_btt-y := $(NVDIMM_SRC)/btt.o
nd_btt-y += config_check.o
nd_blk-y := $(NVDIMM_SRC)/blk.o
nd_blk-y += config_check.o
nd_e820-y := $(NVDIMM_SRC)/e820.o
nd_e820-y += config_check.o
......
......@@ -11,7 +11,6 @@ void check(void)
BUILD_BUG_ON(!IS_MODULE(CONFIG_BLK_DEV_PMEM));
BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT));
BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_PFN));
BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK));
if (IS_ENABLED(CONFIG_ACPI_NFIT))
BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT));
BUILD_BUG_ON(!IS_MODULE(CONFIG_DEV_DAX));
......
......@@ -338,62 +338,6 @@ static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc,
return 0;
}
static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw)
{
struct ndtest_dimm *dimm = ndbr->blk_provider_data;
struct ndtest_blk_mmio *mmio = dimm->mmio;
struct nd_region *nd_region = &ndbr->nd_region;
unsigned int lane;
if (!mmio)
return -ENOMEM;
lane = nd_region_acquire_lane(nd_region);
if (rw)
memcpy(mmio->base + dpa, iobuf, len);
else {
memcpy(iobuf, mmio->base + dpa, len);
arch_invalidate_pmem(mmio->base + dpa, len);
}
nd_region_release_lane(nd_region, lane);
return 0;
}
static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
struct device *dev)
{
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
struct nvdimm *nvdimm;
struct ndtest_dimm *dimm;
struct ndtest_blk_mmio *mmio;
nvdimm = nd_blk_region_to_dimm(ndbr);
dimm = nvdimm_provider_data(nvdimm);
nd_blk_region_set_provider_data(ndbr, dimm);
dimm->blk_region = to_nd_region(dev);
mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
if (!mmio)
return -ENOMEM;
mmio->base = (void __iomem *) devm_nvdimm_memremap(
dev, dimm->address, 12, nd_blk_memremap_flags(ndbr));
if (!mmio->base) {
dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
return -ENOMEM;
}
mmio->size = dimm->size;
mmio->base_offset = 0;
dimm->mmio = mmio;
return 0;
}
static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
{
int i;
......@@ -523,17 +467,16 @@ static int ndtest_create_region(struct ndtest_priv *p,
struct ndtest_region *region)
{
struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
struct nd_blk_region_desc ndbr_desc;
struct nd_region_desc *ndr_desc, _ndr_desc;
struct nd_interleave_set *nd_set;
struct nd_region_desc *ndr_desc;
struct resource res;
int i, ndimm = region->mapping[0].dimm;
u64 uuid[2];
memset(&res, 0, sizeof(res));
memset(&mappings, 0, sizeof(mappings));
memset(&ndbr_desc, 0, sizeof(ndbr_desc));
ndr_desc = &ndbr_desc.ndr_desc;
memset(&_ndr_desc, 0, sizeof(_ndr_desc));
ndr_desc = &_ndr_desc;
if (!ndtest_alloc_resource(p, region->size, &res.start))
return -ENOMEM;
......@@ -857,10 +800,8 @@ static int ndtest_dimm_register(struct ndtest_priv *priv,
struct device *dev = &priv->pdev.dev;
unsigned long dimm_flags = dimm->flags;
if (dimm->num_formats > 1) {
set_bit(NDD_ALIASING, &dimm_flags);
if (dimm->num_formats > 1)
set_bit(NDD_LABELING, &dimm_flags);
}
if (dimm->flags & PAPR_PMEM_UNARMED_MASK)
set_bit(NDD_UNARMED, &dimm_flags);
......
......@@ -2842,28 +2842,6 @@ static void nfit_test1_setup(struct nfit_test *t)
set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
}
static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw)
{
struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
struct nd_region *nd_region = &ndbr->nd_region;
unsigned int lane;
lane = nd_region_acquire_lane(nd_region);
if (rw)
memcpy(mmio->addr.base + dpa, iobuf, len);
else {
memcpy(iobuf, mmio->addr.base + dpa, len);
/* give us some some coverage of the arch_invalidate_pmem() API */
arch_invalidate_pmem(mmio->addr.base + dpa, len);
}
nd_region_release_lane(nd_region, lane);
return 0;
}
static unsigned long nfit_ctl_handle;
union acpi_object *result;
......@@ -3219,7 +3197,6 @@ static int nfit_test_probe(struct platform_device *pdev)
nfit_test->setup(nfit_test);
acpi_desc = &nfit_test->acpi_desc;
acpi_nfit_desc_init(acpi_desc, &pdev->dev);
acpi_desc->blk_do_io = nfit_test_blk_do_io;
nd_desc = &acpi_desc->nd_desc;
nd_desc->provider_name = NULL;
nd_desc->module = THIS_MODULE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment