Commit 4596f554 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'libnvdimm-fixes-4.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm fixes from Dave Jiang:

 - ensure that a variable passed in by reference to acpi_nfit_ctl is
   always set to a value. An incremental patch is provided due to notice
   from testing in -next. The rest of the commits did not exhibit
   issues.

 - fix a return path in nsio_rw_bytes() that was not returning "bytes
   remain" as expected for the function.

 - address an issue where applications polling on scrub-completion for
   the NVDIMM may falsely wakeup and read the wrong state value and
   cause hang.

 - change the test unit persistent capability attribute to fix up a
   broken assumption in the unit test infrastructure wrt the
   'write_cache' attribute

 - ratelimit dev_info() in the dax device check_vma() function since
   this is easily triggered from userspace

* tag 'libnvdimm-fixes-4.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  nfit: fix unchecked dereference in acpi_nfit_ctl
  acpi, nfit: Fix scrub idle detection
  tools/testing/nvdimm: advertise a write cache for nfit_test
  acpi/nfit: fix cmd_rc for acpi_nfit_ctl to always return a value
  dev-dax: check_vma: ratelimit dev_info-s
  libnvdimm, pmem: Fix memcpy_mcsafe() return code handling in nsio_rw_bytes()
parents 63f04777 ee6581ce
...@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, ...@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
const guid_t *guid; const guid_t *guid;
int rc, i; int rc, i;
if (cmd_rc)
*cmd_rc = -EINVAL;
func = cmd; func = cmd;
if (cmd == ND_CMD_CALL) { if (cmd == ND_CMD_CALL) {
call_pkg = buf; call_pkg = buf;
...@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, ...@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* If we return an error (like elsewhere) then caller wouldn't * If we return an error (like elsewhere) then caller wouldn't
* be able to rely upon data returned to make calculation. * be able to rely upon data returned to make calculation.
*/ */
if (cmd_rc)
*cmd_rc = 0;
return 0; return 0;
} }
...@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev, ...@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
mutex_lock(&acpi_desc->init_mutex); mutex_lock(&acpi_desc->init_mutex);
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
work_busy(&acpi_desc->dwork.work) acpi_desc->scrub_busy
&& !acpi_desc->cancel ? "+\n" : "\n"); && !acpi_desc->cancel ? "+\n" : "\n");
mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc->init_mutex);
} }
...@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, ...@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
return 0; return 0;
} }
static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
{
lockdep_assert_held(&acpi_desc->init_mutex);
acpi_desc->scrub_busy = 1;
/* note this should only be set from within the workqueue */
if (tmo)
acpi_desc->scrub_tmo = tmo;
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
}
static void sched_ars(struct acpi_nfit_desc *acpi_desc)
{
__sched_ars(acpi_desc, 0);
}
static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
{
lockdep_assert_held(&acpi_desc->init_mutex);
acpi_desc->scrub_busy = 0;
acpi_desc->scrub_count++;
if (acpi_desc->scrub_count_state)
sysfs_notify_dirent(acpi_desc->scrub_count_state);
}
static void acpi_nfit_scrub(struct work_struct *work) static void acpi_nfit_scrub(struct work_struct *work)
{ {
struct acpi_nfit_desc *acpi_desc; struct acpi_nfit_desc *acpi_desc;
...@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work) ...@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
mutex_lock(&acpi_desc->init_mutex); mutex_lock(&acpi_desc->init_mutex);
query_rc = acpi_nfit_query_poison(acpi_desc); query_rc = acpi_nfit_query_poison(acpi_desc);
tmo = __acpi_nfit_scrub(acpi_desc, query_rc); tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
if (tmo) { if (tmo)
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); __sched_ars(acpi_desc, tmo);
acpi_desc->scrub_tmo = tmo; else
} else { notify_ars_done(acpi_desc);
acpi_desc->scrub_count++;
if (acpi_desc->scrub_count_state)
sysfs_notify_dirent(acpi_desc->scrub_count_state);
}
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc->init_mutex);
} }
...@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) ...@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
break; break;
} }
queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); sched_ars(acpi_desc);
return 0; return 0;
} }
...@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) ...@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
} }
} }
if (scheduled) { if (scheduled) {
queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); sched_ars(acpi_desc);
dev_dbg(dev, "ars_scan triggered\n"); dev_dbg(dev, "ars_scan triggered\n");
} }
mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc->init_mutex);
......
...@@ -203,6 +203,7 @@ struct acpi_nfit_desc { ...@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
unsigned int max_ars; unsigned int max_ars;
unsigned int scrub_count; unsigned int scrub_count;
unsigned int scrub_mode; unsigned int scrub_mode;
unsigned int scrub_busy:1;
unsigned int cancel:1; unsigned int cancel:1;
unsigned long dimm_cmd_force_en; unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en; unsigned long bus_cmd_force_en;
......
...@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, ...@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
/* prevent private mappings from being established */ /* prevent private mappings from being established */
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info(dev, "%s: %s: fail, attempted private mapping\n", dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
current->comm, func); current->comm, func);
return -EINVAL; return -EINVAL;
} }
mask = dax_region->align - 1; mask = dax_region->align - 1;
if (vma->vm_start & mask || vma->vm_end & mask) { if (vma->vm_start & mask || vma->vm_end & mask) {
dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
current->comm, func, vma->vm_start, vma->vm_end, current->comm, func, vma->vm_start, vma->vm_end,
mask); mask);
return -EINVAL; return -EINVAL;
...@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, ...@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
&& (vma->vm_flags & VM_DONTCOPY) == 0) { && (vma->vm_flags & VM_DONTCOPY) == 0) {
dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", dev_info_ratelimited(dev,
"%s: %s: fail, dax range requires MADV_DONTFORK\n",
current->comm, func); current->comm, func);
return -EINVAL; return -EINVAL;
} }
if (!vma_is_dax(vma)) { if (!vma_is_dax(vma)) {
dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
current->comm, func); current->comm, func);
return -EINVAL; return -EINVAL;
} }
......
...@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, ...@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
return -EIO; return -EIO;
if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
return -EIO; return -EIO;
return 0;
} }
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
......
...@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t) ...@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t)
pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
pcap->header.length = sizeof(*pcap); pcap->header.length = sizeof(*pcap);
pcap->highest_capability = 1; pcap->highest_capability = 1;
pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH | pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
ACPI_NFIT_CAPABILITY_MEM_FLUSH;
offset += pcap->header.length; offset += pcap->header.length;
if (t->setup_hotplug) { if (t->setup_hotplug) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment