Commit 3dd60fb9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams

nvdimm/pmem: stop using q_usage_count as external pgmap refcount

Originally all DAX access when through block_device operations and thus
needed a queue reference.  But since commit cccbce67
("filesystem-dax: convert to dax_direct_access()") all this happens at
the DAX device level which uses its own refcounting.  Having the external
refcount thus wasn't needed but has otherwise been harmless for long
time.

But now that "block: drain file system I/O on del_gendisk" waits for
q_usage_count to reach 0 in del_gendisk this whole scheme can't work
anymore (and pmem is the only driver abusing q_usage_count like that).
So switch to the internal reference and remove the unbalanced
blk_freeze_queue_start that is taken care of by del_gendisk.

Fixes: 8e141f9e ("block: drain file system I/O on del_gendisk")
Reported-by: default avatarYi Zhang <yi.zhang@redhat.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211019073641.2323410-2-hch@lst.deSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 3906fe9b
...@@ -333,26 +333,6 @@ static const struct attribute_group *pmem_attribute_groups[] = { ...@@ -333,26 +333,6 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL, NULL,
}; };
static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
{
struct pmem_device *pmem = pgmap->owner;
blk_cleanup_disk(pmem->disk);
}
static void pmem_release_queue(void *pgmap)
{
pmem_pagemap_cleanup(pgmap);
}
static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
{
struct request_queue *q =
container_of(pgmap->ref, struct request_queue, q_usage_counter);
blk_freeze_queue_start(q);
}
static void pmem_release_disk(void *__pmem) static void pmem_release_disk(void *__pmem)
{ {
struct pmem_device *pmem = __pmem; struct pmem_device *pmem = __pmem;
...@@ -360,12 +340,9 @@ static void pmem_release_disk(void *__pmem) ...@@ -360,12 +340,9 @@ static void pmem_release_disk(void *__pmem)
kill_dax(pmem->dax_dev); kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev); put_dax(pmem->dax_dev);
del_gendisk(pmem->disk); del_gendisk(pmem->disk);
}
static const struct dev_pagemap_ops fsdax_pagemap_ops = { blk_cleanup_disk(pmem->disk);
.kill = pmem_pagemap_kill, }
.cleanup = pmem_pagemap_cleanup,
};
static int pmem_attach_disk(struct device *dev, static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns) struct nd_namespace_common *ndns)
...@@ -427,10 +404,8 @@ static int pmem_attach_disk(struct device *dev, ...@@ -427,10 +404,8 @@ static int pmem_attach_disk(struct device *dev,
pmem->disk = disk; pmem->disk = disk;
pmem->pgmap.owner = pmem; pmem->pgmap.owner = pmem;
pmem->pfn_flags = PFN_DEV; pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
if (is_nd_pfn(dev)) { if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap); addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb; pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
...@@ -444,16 +419,12 @@ static int pmem_attach_disk(struct device *dev, ...@@ -444,16 +419,12 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end; pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1; pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap); addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP; pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range; bb_range = pmem->pgmap.range;
} else { } else {
addr = devm_memremap(dev, pmem->phys_addr, addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM); pmem->size, ARCH_MEMREMAP_PMEM);
if (devm_add_action_or_reset(dev, pmem_release_queue,
&pmem->pgmap))
return -ENOMEM;
bb_range.start = res->start; bb_range.start = res->start;
bb_range.end = res->end; bb_range.end = res->end;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment