Commit a1facc1f authored by Dan Williams's avatar Dan Williams Committed by Vishal Verma

ACPI: NFIT: Add runtime firmware activate support

Plumb the platform specific backend for the generic libnvdimm firmware
activate interface. Register dimm level operations to arm/disarm
activation, and register bus level operations to report the dynamic
platform-quiesce time relative to the number of dimms armed for firmware
activation.

A new nfit-specific bus attribute "firmware_activate_noidle" is added to
allow the activation to switch between platform enforced, and OS
opportunistic device quiesce. In other words, let the hibernate cycle
handle in-flight device-dma rather than the platform attempting to
increase PCI-E timeouts and the like.

Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarVishal Verma <vishal.l.verma@intel.com>
parent 48001ea5
...@@ -202,6 +202,25 @@ Description: ...@@ -202,6 +202,25 @@ Description:
functions. See the section named 'NVDIMM Root Device _DSMs' in functions. See the section named 'NVDIMM Root Device _DSMs' in
the ACPI specification. the ACPI specification.
What: /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle
Date: Apr, 2020
KernelVersion: v5.8
Contact: linux-nvdimm@lists.01.org
Description:
(RW) The Intel platform implementation of firmware activate
support exposes an option let the platform force idle devices in
the system over the activation event, or trust that the OS will
do it. The safe default is to let the platform force idle
devices since the kernel is already in a suspend state, and on
the chance that a driver does not properly quiesce bus-mastering
after a suspend callback the platform will handle it. However,
the activation might abort if, for example, platform firmware
determines that the activation time exceeds the max PCI-E
completion timeout. Since the platform does not know whether the
OS is running the activation from a suspend context it aborts,
but if the system owner trusts driver suspend callback to be
sufficient then 'firmware_activation_noidle' can be
enabled to bypass the activation abort.
What: /sys/bus/nd/devices/regionX/nfit/range_index What: /sys/bus/nd/devices/regionX/nfit/range_index
Date: Jun, 2015 Date: Jun, 2015
......
...@@ -1392,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) ...@@ -1392,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) if (a == &dev_attr_scrub.attr)
return 0; return ars_supported(nvdimm_bus) ? a->mode : 0;
if (a == &dev_attr_firmware_activate_noidle.attr)
return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
return a->mode; return a->mode;
} }
...@@ -1402,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = { ...@@ -1402,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = {
&dev_attr_scrub.attr, &dev_attr_scrub.attr,
&dev_attr_hw_error_scrub.attr, &dev_attr_hw_error_scrub.attr,
&dev_attr_bus_dsm_mask.attr, &dev_attr_bus_dsm_mask.attr,
&dev_attr_firmware_activate_noidle.attr,
NULL, NULL,
}; };
...@@ -2019,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) ...@@ -2019,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
} }
} }
static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
struct nfit_mem *nfit_mem)
{
unsigned long mask;
struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
if (!nd_desc->fw_ops)
return NULL;
if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
return NULL;
mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
return NULL;
return intel_fw_ops;
}
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{ {
struct nfit_mem *nfit_mem; struct nfit_mem *nfit_mem;
...@@ -2095,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) ...@@ -2095,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
acpi_nfit_dimm_attribute_groups, acpi_nfit_dimm_attribute_groups,
flags, cmd_mask, flush ? flush->hint_count : 0, flags, cmd_mask, flush ? flush->hint_count : 0,
nfit_mem->flush_wpq, &nfit_mem->id[0], nfit_mem->flush_wpq, &nfit_mem->id[0],
acpi_nfit_get_security_ops(nfit_mem->family)); acpi_nfit_get_security_ops(nfit_mem->family),
acpi_nfit_get_fw_ops(nfit_mem));
if (!nvdimm) if (!nvdimm)
return -ENOMEM; return -ENOMEM;
...@@ -2170,8 +2196,10 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) ...@@ -2170,8 +2196,10 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
if (acpi_desc->bus_cmd_force_en) { if (acpi_desc->bus_cmd_force_en) {
nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
mask = &nd_desc->bus_family_mask; mask = &nd_desc->bus_family_mask;
if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
nd_desc->fw_ops = intel_bus_fw_ops;
}
} }
adev = to_acpi_dev(acpi_desc); adev = to_acpi_dev(acpi_desc);
...@@ -2202,6 +2230,11 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) ...@@ -2202,6 +2230,11 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
set_bit(i, mask); set_bit(i, mask);
if (*mask == dsm_mask) {
set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
nd_desc->fw_ops = intel_bus_fw_ops;
}
} }
static ssize_t range_index_show(struct device *dev, static ssize_t range_index_show(struct device *dev,
......
This diff is collapsed.
...@@ -169,4 +169,7 @@ struct nd_intel_bus_fw_activate { ...@@ -169,4 +169,7 @@ struct nd_intel_bus_fw_activate {
u8 iodev_state; u8 iodev_state;
u32 status; u32 status;
} __packed; } __packed;
extern const struct nvdimm_fw_ops *intel_fw_ops;
extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops;
#endif #endif
...@@ -220,6 +220,9 @@ struct nfit_mem { ...@@ -220,6 +220,9 @@ struct nfit_mem {
struct list_head list; struct list_head list;
struct acpi_device *adev; struct acpi_device *adev;
struct acpi_nfit_desc *acpi_desc; struct acpi_nfit_desc *acpi_desc;
enum nvdimm_fwa_state fwa_state;
enum nvdimm_fwa_result fwa_result;
int fwa_count;
char id[NFIT_DIMM_ID_LEN+1]; char id[NFIT_DIMM_ID_LEN+1];
struct resource *flush_wpq; struct resource *flush_wpq;
unsigned long dsm_mask; unsigned long dsm_mask;
...@@ -265,6 +268,11 @@ struct acpi_nfit_desc { ...@@ -265,6 +268,11 @@ struct acpi_nfit_desc {
unsigned int scrub_tmo; unsigned int scrub_tmo;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw); void *iobuf, u64 len, int rw);
enum nvdimm_fwa_state fwa_state;
enum nvdimm_fwa_capability fwa_cap;
int fwa_count;
bool fwa_noidle;
bool fwa_nosuspend;
}; };
enum scrub_mode { enum scrub_mode {
...@@ -367,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event); ...@@ -367,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event);
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus);
extern struct device_attribute dev_attr_firmware_activate_noidle;
#endif /* __NFIT_H__ */ #endif /* __NFIT_H__ */
...@@ -582,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, ...@@ -582,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups, void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush, unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id, struct resource *flush_wpq, const char *dimm_id,
const struct nvdimm_security_ops *sec_ops) const struct nvdimm_security_ops *sec_ops,
const struct nvdimm_fw_ops *fw_ops)
{ {
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev; struct device *dev;
...@@ -612,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, ...@@ -612,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
dev->devt = MKDEV(nvdimm_major, nvdimm->id); dev->devt = MKDEV(nvdimm_major, nvdimm->id);
dev->groups = groups; dev->groups = groups;
nvdimm->sec.ops = sec_ops; nvdimm->sec.ops = sec_ops;
nvdimm->fw_ops = fw_ops;
nvdimm->sec.overwrite_tmo = 0; nvdimm->sec.overwrite_tmo = 0;
INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
/* /*
......
...@@ -269,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, ...@@ -269,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups, void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush, unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id, struct resource *flush_wpq, const char *dimm_id,
const struct nvdimm_security_ops *sec_ops); const struct nvdimm_security_ops *sec_ops,
const struct nvdimm_fw_ops *fw_ops);
static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups, void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush, unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq) struct resource *flush_wpq)
{ {
return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
cmd_mask, num_flush, flush_wpq, NULL, NULL); cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
} }
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment