Commit 6342649c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.11-20240726' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - Fix request without payloads cleanup  (Leon)
     - Use new protection information format (Francis)
     - Improved debug message for lost pci link (Bart)
     - Another apst quirk (Wang)
     - Use appropriate sysfs api for printing chars (Markus)

 - ublk async device deletion fix (Ming)

 - drbd kerneldoc fixups (Simon)

 - Fix deadlock between sd removal and release (Yang)

* tag 'block-6.11-20240726' of git://git.kernel.dk/linux:
  nvme-pci: add missing condition check for existence of mapped data
  ublk: fix UBLK_CMD_DEL_DEV_ASYNC handling
  block: fix deadlock between sd_remove & sd_release
  drbd: Add peer_device to Kernel doc
  nvme-core: choose PIF from QPIF if QPIFS supports and PIF is QTYPE
  nvme-pci: Fix the instructions for disabling power management
  nvme: remove redundant bdev local variable
  nvme-fabrics: Use seq_putc() in __nvmf_concat_opt_tokens()
  nvme/pci: Add APST quirk for Lenovo N60z laptop
parents 8c930747 f6bb5254
...@@ -663,12 +663,12 @@ void del_gendisk(struct gendisk *disk) ...@@ -663,12 +663,12 @@ void del_gendisk(struct gendisk *disk)
*/ */
if (!test_bit(GD_DEAD, &disk->state)) if (!test_bit(GD_DEAD, &disk->state))
blk_report_disk_dead(disk, false); blk_report_disk_dead(disk, false);
__blk_mark_disk_dead(disk);
/* /*
* Drop all partitions now that the disk is marked dead. * Drop all partitions now that the disk is marked dead.
*/ */
mutex_lock(&disk->open_mutex); mutex_lock(&disk->open_mutex);
__blk_mark_disk_dead(disk);
xa_for_each_start(&disk->part_tbl, idx, part, 1) xa_for_each_start(&disk->part_tbl, idx, part, 1)
drop_partition(part); drop_partition(part);
mutex_unlock(&disk->open_mutex); mutex_unlock(&disk->open_mutex);
......
...@@ -3422,6 +3422,7 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) ...@@ -3422,6 +3422,7 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
/** /**
* drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
* @device: DRBD device. * @device: DRBD device.
* @peer_device: Peer DRBD device.
* *
* Sets all bits in the bitmap and writes the whole bitmap to stable storage. * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
*/ */
...@@ -3448,6 +3449,7 @@ int drbd_bmio_set_n_write(struct drbd_device *device, ...@@ -3448,6 +3449,7 @@ int drbd_bmio_set_n_write(struct drbd_device *device,
/** /**
* drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
* @device: DRBD device. * @device: DRBD device.
* @peer_device: Peer DRBD device.
* *
* Clears all bits in the bitmap and writes the whole bitmap to stable storage. * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
*/ */
...@@ -3501,6 +3503,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused) ...@@ -3501,6 +3503,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
* @done: callback to be called after the bitmap IO was performed * @done: callback to be called after the bitmap IO was performed
* @why: Descriptive text of the reason for doing the IO * @why: Descriptive text of the reason for doing the IO
* @flags: Bitmap flags * @flags: Bitmap flags
* @peer_device: Peer DRBD device.
* *
* While IO on the bitmap happens we freeze application IO thus we ensure * While IO on the bitmap happens we freeze application IO thus we ensure
* that drbd_set_out_of_sync() can not be called. This function MAY ONLY be * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
...@@ -3549,6 +3552,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device, ...@@ -3549,6 +3552,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
* @io_fn: IO callback to be called when bitmap IO is possible * @io_fn: IO callback to be called when bitmap IO is possible
* @why: Descriptive text of the reason for doing the IO * @why: Descriptive text of the reason for doing the IO
* @flags: Bitmap flags * @flags: Bitmap flags
* @peer_device: Peer DRBD device.
* *
* freezes application IO while that the actual IO operations runs. This * freezes application IO while that the actual IO operations runs. This
* functions MAY NOT be called from worker context. * functions MAY NOT be called from worker context.
......
...@@ -48,6 +48,9 @@ ...@@ -48,6 +48,9 @@
#define UBLK_MINORS (1U << MINORBITS) #define UBLK_MINORS (1U << MINORBITS)
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
/* All UBLK_F_* have to be included into UBLK_F_ALL */ /* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \ #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
| UBLK_F_URING_CMD_COMP_IN_TASK \ | UBLK_F_URING_CMD_COMP_IN_TASK \
...@@ -2903,7 +2906,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, ...@@ -2903,7 +2906,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_DEL_DEV: case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(&ub, true); ret = ublk_ctrl_del_dev(&ub, true);
break; break;
case UBLK_U_CMD_DEL_DEV_ASYNC: case UBLK_CMD_DEL_DEV_ASYNC:
ret = ublk_ctrl_del_dev(&ub, false); ret = ublk_ctrl_del_dev(&ub, false);
break; break;
case UBLK_CMD_GET_QUEUE_AFFINITY: case UBLK_CMD_GET_QUEUE_AFFINITY:
......
...@@ -1876,12 +1876,18 @@ static void nvme_configure_pi_elbas(struct nvme_ns_head *head, ...@@ -1876,12 +1876,18 @@ static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
{ {
u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
u8 guard_type;
/* no support for storage tag formats right now */ /* no support for storage tag formats right now */
if (nvme_elbaf_sts(elbaf)) if (nvme_elbaf_sts(elbaf))
return; return;
head->guard_type = nvme_elbaf_guard_type(elbaf); guard_type = nvme_elbaf_guard_type(elbaf);
if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) &&
guard_type == NVME_NVM_NS_QTYPE_GUARD)
guard_type = nvme_elbaf_qualified_guard_type(elbaf);
head->guard_type = guard_type;
switch (head->guard_type) { switch (head->guard_type) {
case NVME_NVM_NS_64B_GUARD: case NVME_NVM_NS_64B_GUARD:
head->pi_size = sizeof(struct crc64_pi_tuple); head->pi_size = sizeof(struct crc64_pi_tuple);
......
...@@ -1403,10 +1403,10 @@ static void __nvmf_concat_opt_tokens(struct seq_file *seq_file) ...@@ -1403,10 +1403,10 @@ static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
tok = &opt_tokens[idx]; tok = &opt_tokens[idx];
if (tok->token == NVMF_OPT_ERR) if (tok->token == NVMF_OPT_ERR)
continue; continue;
seq_puts(seq_file, ","); seq_putc(seq_file, ',');
seq_puts(seq_file, tok->pattern); seq_puts(seq_file, tok->pattern);
} }
seq_puts(seq_file, "\n"); seq_putc(seq_file, '\n');
} }
static int nvmf_dev_show(struct seq_file *seq_file, void *private) static int nvmf_dev_show(struct seq_file *seq_file, void *private)
......
...@@ -863,7 +863,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) ...@@ -863,7 +863,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
nvme_start_request(req); nvme_start_request(req);
return BLK_STS_OK; return BLK_STS_OK;
out_unmap_data: out_unmap_data:
nvme_unmap_data(dev, req); if (blk_rq_nr_phys_segments(req))
nvme_unmap_data(dev, req);
out_free_cmd: out_free_cmd:
nvme_cleanup_cmd(req); nvme_cleanup_cmd(req);
return ret; return ret;
...@@ -1309,7 +1310,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) ...@@ -1309,7 +1310,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"Does your device have a faulty power saving mode enabled?\n"); "Does your device have a faulty power saving mode enabled?\n");
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n");
} }
static enum blk_eh_timer_return nvme_timeout(struct request *req) static enum blk_eh_timer_return nvme_timeout(struct request *req)
...@@ -2968,6 +2969,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) ...@@ -2968,6 +2969,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
} }
/*
* NVMe SSD drops off the PCIe bus after system idle
* for 10 hours on a Lenovo N60z board.
*/
if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
return NVME_QUIRK_NO_APST;
return 0; return 0;
} }
......
...@@ -233,13 +233,12 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, ...@@ -233,13 +233,12 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
{ {
struct nvme_ns_head *head = dev_to_ns_head(dev); struct nvme_ns_head *head = dev_to_ns_head(dev);
struct gendisk *disk = dev_to_disk(dev); struct gendisk *disk = dev_to_disk(dev);
struct block_device *bdev = disk->part0;
int ret; int ret;
if (nvme_disk_is_ns_head(bdev->bd_disk)) if (nvme_disk_is_ns_head(disk))
ret = ns_head_update_nuse(head); ret = ns_head_update_nuse(head);
else else
ret = ns_update_nuse(bdev->bd_disk->private_data); ret = ns_update_nuse(disk->private_data);
if (ret) if (ret)
return ret; return ret;
......
...@@ -485,6 +485,9 @@ enum { ...@@ -485,6 +485,9 @@ enum {
NVME_ID_NS_NVM_STS_MASK = 0x7f, NVME_ID_NS_NVM_STS_MASK = 0x7f,
NVME_ID_NS_NVM_GUARD_SHIFT = 7, NVME_ID_NS_NVM_GUARD_SHIFT = 7,
NVME_ID_NS_NVM_GUARD_MASK = 0x3, NVME_ID_NS_NVM_GUARD_MASK = 0x3,
NVME_ID_NS_NVM_QPIF_SHIFT = 9,
NVME_ID_NS_NVM_QPIF_MASK = 0xf,
NVME_ID_NS_NVM_QPIFS = 1 << 3,
}; };
static inline __u8 nvme_elbaf_sts(__u32 elbaf) static inline __u8 nvme_elbaf_sts(__u32 elbaf)
...@@ -497,6 +500,11 @@ static inline __u8 nvme_elbaf_guard_type(__u32 elbaf) ...@@ -497,6 +500,11 @@ static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK; return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
} }
static inline __u8 nvme_elbaf_qualified_guard_type(__u32 elbaf)
{
return (elbaf >> NVME_ID_NS_NVM_QPIF_SHIFT) & NVME_ID_NS_NVM_QPIF_MASK;
}
struct nvme_id_ctrl_nvm { struct nvme_id_ctrl_nvm {
__u8 vsl; __u8 vsl;
__u8 wzsl; __u8 wzsl;
...@@ -576,6 +584,7 @@ enum { ...@@ -576,6 +584,7 @@ enum {
NVME_NVM_NS_16B_GUARD = 0, NVME_NVM_NS_16B_GUARD = 0,
NVME_NVM_NS_32B_GUARD = 1, NVME_NVM_NS_32B_GUARD = 1,
NVME_NVM_NS_64B_GUARD = 2, NVME_NVM_NS_64B_GUARD = 2,
NVME_NVM_NS_QTYPE_GUARD = 3,
}; };
static inline __u8 nvme_lbaf_index(__u8 flbas) static inline __u8 nvme_lbaf_index(__u8 flbas)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment