Commit a3c0e7b1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'libnvdimm-fixes-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

More libnvdimm updates from Dan Williams:

 - Complete the reworks to interoperate with powerpc dynamic huge page
   sizes

 - Fix a crash due to missed accounting for the powerpc 'struct
   page'-memmap mapping granularity

 - Fix badblock initialization for volatile (DRAM emulated) pmem ranges

 - Stop triggering request_key() notifications to userspace when
   NVDIMM-security is disabled / not present

 - Miscellaneous small fixups

* tag 'libnvdimm-fixes-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  libnvdimm/region: Enable MAP_SYNC for volatile regions
  libnvdimm: prevent nvdimm from requesting key when security is disabled
  libnvdimm/region: Initialize bad block for volatile namespaces
  libnvdimm/nfit_test: Fix acpi_handle redefinition
  libnvdimm/altmap: Track namespace boundaries in altmap
  libnvdimm: Fix endian conversion issues 
  libnvdimm/dax: Pick the right alignment default when creating dax devices
  powerpc/book3s64: Export has_transparent_hugepage() related functions.
parents 939ca9f1 4c806b89
...@@ -254,7 +254,13 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, ...@@ -254,7 +254,13 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp); unsigned long addr, pmd_t *pmdp);
extern int radix__has_transparent_hugepage(void); static inline int radix__has_transparent_hugepage(void)
{
/* For radix 2M at PMD level means thp */
if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
return 1;
return 0;
}
#endif #endif
extern int __meminit radix__vmemmap_create_mapping(unsigned long start, extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
......
...@@ -406,6 +406,8 @@ int hash__has_transparent_hugepage(void) ...@@ -406,6 +406,8 @@ int hash__has_transparent_hugepage(void)
return 1; return 1;
} }
EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
......
...@@ -1027,13 +1027,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, ...@@ -1027,13 +1027,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
return old_pmd; return old_pmd;
} }
int radix__has_transparent_hugepage(void)
{
/* For radix 2M at PMD level means thp */
if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
return 1;
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
......
...@@ -172,6 +172,21 @@ static __meminit void vmemmap_list_populate(unsigned long phys, ...@@ -172,6 +172,21 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmemmap_list = vmem_back; vmemmap_list = vmem_back;
} }
static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
unsigned long page_size)
{
unsigned long nr_pfn = page_size / sizeof(struct page);
unsigned long start_pfn = page_to_pfn((struct page *)start);
if ((start_pfn + nr_pfn) > altmap->end_pfn)
return true;
if (start_pfn < altmap->base_pfn)
return true;
return false;
}
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap) struct vmem_altmap *altmap)
{ {
...@@ -194,7 +209,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -194,7 +209,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
* fail due to alignment issues when using 16MB hugepages, so * fail due to alignment issues when using 16MB hugepages, so
* fall back to system memory if the altmap allocation fail. * fall back to system memory if the altmap allocation fail.
*/ */
if (altmap) { if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
p = altmap_alloc_block_buf(page_size, altmap); p = altmap_alloc_block_buf(page_size, altmap);
if (!p) if (!p)
pr_debug("altmap block allocation failed, falling back to system memory"); pr_debug("altmap block allocation failed, falling back to system memory");
......
...@@ -392,9 +392,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, ...@@ -392,9 +392,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
if (++(arena->freelist[lane].seq) == 4) if (++(arena->freelist[lane].seq) == 4)
arena->freelist[lane].seq = 1; arena->freelist[lane].seq = 1;
if (ent_e_flag(ent->old_map)) if (ent_e_flag(le32_to_cpu(ent->old_map)))
arena->freelist[lane].has_err = 1; arena->freelist[lane].has_err = 1;
arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map)); arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
return ret; return ret;
} }
...@@ -560,8 +560,8 @@ static int btt_freelist_init(struct arena_info *arena) ...@@ -560,8 +560,8 @@ static int btt_freelist_init(struct arena_info *arena)
* FIXME: if error clearing fails during init, we want to make * FIXME: if error clearing fails during init, we want to make
* the BTT read-only * the BTT read-only
*/ */
if (ent_e_flag(log_new.old_map) && if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
!ent_normal(log_new.old_map)) { !ent_normal(le32_to_cpu(log_new.old_map))) {
arena->freelist[i].has_err = 1; arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i); ret = arena_clear_freelist_error(arena, i);
if (ret) if (ret)
......
...@@ -180,7 +180,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data) ...@@ -180,7 +180,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
sector_t sector; sector_t sector;
/* make sure device is a region */ /* make sure device is a region */
if (!is_nd_pmem(dev)) if (!is_memory(dev))
return 0; return 0;
nd_region = to_nd_region(dev); nd_region = to_nd_region(dev);
......
...@@ -1987,7 +1987,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, ...@@ -1987,7 +1987,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
nd_mapping = &nd_region->mapping[i]; nd_mapping = &nd_region->mapping[i];
label_ent = list_first_entry_or_null(&nd_mapping->labels, label_ent = list_first_entry_or_null(&nd_mapping->labels,
typeof(*label_ent), list); typeof(*label_ent), list);
label0 = label_ent ? label_ent->label : 0; label0 = label_ent ? label_ent->label : NULL;
if (!label0) { if (!label0) {
WARN_ON(1); WARN_ON(1);
...@@ -2322,8 +2322,9 @@ static struct device **scan_labels(struct nd_region *nd_region) ...@@ -2322,8 +2322,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue; continue;
/* skip labels that describe extents outside of the region */ /* skip labels that describe extents outside of the region */
if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end) if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
continue; __le64_to_cpu(nd_label->dpa) > map_end)
continue;
i = add_namespace_resource(nd_region, nd_label, devs, count); i = add_namespace_resource(nd_region, nd_label, devs, count);
if (i < 0) if (i < 0)
......
...@@ -289,11 +289,7 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region) ...@@ -289,11 +289,7 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
struct nd_pfn *to_nd_pfn(struct device *dev); struct nd_pfn *to_nd_pfn(struct device *dev);
#if IS_ENABLED(CONFIG_NVDIMM_PFN) #if IS_ENABLED(CONFIG_NVDIMM_PFN)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #define MAX_NVDIMM_ALIGN 4
#define PFN_DEFAULT_ALIGNMENT HPAGE_PMD_SIZE
#else
#define PFN_DEFAULT_ALIGNMENT PAGE_SIZE
#endif
int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns); int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_pfn(struct device *dev); bool is_nd_pfn(struct device *dev);
......
...@@ -103,39 +103,42 @@ static ssize_t align_show(struct device *dev, ...@@ -103,39 +103,42 @@ static ssize_t align_show(struct device *dev,
return sprintf(buf, "%ld\n", nd_pfn->align); return sprintf(buf, "%ld\n", nd_pfn->align);
} }
static const unsigned long *nd_pfn_supported_alignments(void) static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
{ {
/*
* This needs to be a non-static variable because the *_SIZE
* macros aren't always constants.
*/
const unsigned long supported_alignments[] = {
PAGE_SIZE,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
HPAGE_PMD_SIZE,
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
HPAGE_PUD_SIZE,
#endif
#endif
0,
};
static unsigned long data[ARRAY_SIZE(supported_alignments)];
memcpy(data, supported_alignments, sizeof(data)); alignments[0] = PAGE_SIZE;
if (has_transparent_hugepage()) {
alignments[1] = HPAGE_PMD_SIZE;
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
alignments[2] = HPAGE_PUD_SIZE;
}
return alignments;
}
/*
* Use pmd mapping if supported as default alignment
*/
static unsigned long nd_pfn_default_alignment(void)
{
return data; if (has_transparent_hugepage())
return HPAGE_PMD_SIZE;
return PAGE_SIZE;
} }
static ssize_t align_store(struct device *dev, static ssize_t align_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len) struct device_attribute *attr, const char *buf, size_t len)
{ {
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
ssize_t rc; ssize_t rc;
nd_device_lock(dev); nd_device_lock(dev);
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align, rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments()); nd_pfn_supported_alignments(aligns));
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n"); buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
...@@ -259,7 +262,10 @@ static DEVICE_ATTR_RO(size); ...@@ -259,7 +262,10 @@ static DEVICE_ATTR_RO(size);
static ssize_t supported_alignments_show(struct device *dev, static ssize_t supported_alignments_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
return nd_size_select_show(0, nd_pfn_supported_alignments(), buf); unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
return nd_size_select_show(0,
nd_pfn_supported_alignments(aligns), buf);
} }
static DEVICE_ATTR_RO(supported_alignments); static DEVICE_ATTR_RO(supported_alignments);
...@@ -302,7 +308,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, ...@@ -302,7 +308,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
return NULL; return NULL;
nd_pfn->mode = PFN_MODE_NONE; nd_pfn->mode = PFN_MODE_NONE;
nd_pfn->align = PFN_DEFAULT_ALIGNMENT; nd_pfn->align = nd_pfn_default_alignment();
dev = &nd_pfn->dev; dev = &nd_pfn->dev;
device_initialize(&nd_pfn->dev); device_initialize(&nd_pfn->dev);
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
...@@ -412,6 +418,21 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) ...@@ -412,6 +418,21 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
return 0; return 0;
} }
static bool nd_supported_alignment(unsigned long align)
{
int i;
unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
if (align == 0)
return false;
nd_pfn_supported_alignments(supported);
for (i = 0; supported[i]; i++)
if (align == supported[i])
return true;
return false;
}
/** /**
* nd_pfn_validate - read and validate info-block * nd_pfn_validate - read and validate info-block
* @nd_pfn: fsdax namespace runtime state / properties * @nd_pfn: fsdax namespace runtime state / properties
...@@ -496,6 +517,18 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) ...@@ -496,6 +517,18 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/*
* Check whether the we support the alignment. For Dax if the
* superblock alignment is not matching, we won't initialize
* the device.
*/
if (!nd_supported_alignment(align) &&
!memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
"%ld:%ld\n", nd_pfn->align, align);
return -EOPNOTSUPP;
}
if (!nd_pfn->uuid) { if (!nd_pfn->uuid) {
/* /*
* When probing a namepace via nd_pfn_probe() the uuid * When probing a namepace via nd_pfn_probe() the uuid
...@@ -639,9 +672,11 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) ...@@ -639,9 +672,11 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad; resource_size_t base = nsio->res.start + start_pad;
resource_size_t end = nsio->res.end - end_trunc;
struct vmem_altmap __altmap = { struct vmem_altmap __altmap = {
.base_pfn = init_altmap_base(base), .base_pfn = init_altmap_base(base),
.reserve = init_altmap_reserve(base), .reserve = init_altmap_reserve(base),
.end_pfn = PHYS_PFN(end),
}; };
memcpy(res, &nsio->res, sizeof(*res)); memcpy(res, &nsio->res, sizeof(*res));
......
...@@ -34,7 +34,7 @@ static int nd_region_probe(struct device *dev) ...@@ -34,7 +34,7 @@ static int nd_region_probe(struct device *dev)
if (rc) if (rc)
return rc; return rc;
if (is_nd_pmem(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
struct resource ndr_res; struct resource ndr_res;
if (devm_init_badblocks(dev, &nd_region->bb)) if (devm_init_badblocks(dev, &nd_region->bb))
...@@ -123,7 +123,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event) ...@@ -123,7 +123,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
struct resource res; struct resource res;
if (is_nd_pmem(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
res.start = nd_region->ndr_start; res.start = nd_region->ndr_start;
res.end = nd_region->ndr_start + res.end = nd_region->ndr_start +
nd_region->ndr_size - 1; nd_region->ndr_size - 1;
......
...@@ -632,11 +632,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -632,11 +632,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
return 0; return 0;
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
return 0; return 0;
if (a == &dev_attr_resource.attr) { if (a == &dev_attr_resource.attr) {
if (is_nd_pmem(dev)) if (is_memory(dev))
return 0400; return 0400;
else else
return 0; return 0;
...@@ -1168,6 +1168,9 @@ EXPORT_SYMBOL_GPL(nvdimm_has_cache); ...@@ -1168,6 +1168,9 @@ EXPORT_SYMBOL_GPL(nvdimm_has_cache);
bool is_nvdimm_sync(struct nd_region *nd_region) bool is_nvdimm_sync(struct nd_region *nd_region)
{ {
if (is_nd_volatile(&nd_region->dev))
return true;
return is_nd_pmem(&nd_region->dev) && return is_nd_pmem(&nd_region->dev) &&
!test_bit(ND_REGION_ASYNC, &nd_region->flags); !test_bit(ND_REGION_ASYNC, &nd_region->flags);
} }
......
...@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) ...@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
|| !nvdimm->sec.flags) || !nvdimm->sec.flags)
return -EIO; return -EIO;
/* No need to go further if security is disabled */
if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
return 0;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
dev_dbg(dev, "Security operation in progress.\n"); dev_dbg(dev, "Security operation in progress.\n");
return -EBUSY; return -EBUSY;
......
...@@ -108,7 +108,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) ...@@ -108,7 +108,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
return true; return true;
/*
* For dax vmas, try to always use hugepage mappings. If the kernel does
* not support hugepages, fsdax mappings will fallback to PAGE_SIZE
* mappings, and device-dax namespaces, that try to guarantee a given
* mapping size, will fail to enable
*/
if (vma_is_dax(vma)) if (vma_is_dax(vma))
return true; return true;
......
...@@ -17,6 +17,7 @@ struct device; ...@@ -17,6 +17,7 @@ struct device;
*/ */
struct vmem_altmap { struct vmem_altmap {
const unsigned long base_pfn; const unsigned long base_pfn;
const unsigned long end_pfn;
const unsigned long reserve; const unsigned long reserve;
unsigned long free; unsigned long free;
unsigned long align; unsigned long align;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#ifndef __NFIT_TEST_H__ #ifndef __NFIT_TEST_H__
#define __NFIT_TEST_H__ #define __NFIT_TEST_H__
#include <linux/acpi.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/uuid.h> #include <linux/uuid.h>
#include <linux/ioport.h> #include <linux/ioport.h>
...@@ -202,9 +203,6 @@ struct nd_intel_lss { ...@@ -202,9 +203,6 @@ struct nd_intel_lss {
__u32 status; __u32 status;
} __packed; } __packed;
union acpi_object;
typedef void *acpi_handle;
typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t); typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle, typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
const guid_t *guid, u64 rev, u64 func, const guid_t *guid, u64 rev, u64 func,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment