Commit 44ef1604 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "smartpqi updates"

Don Brace <don.brace@microchip.com> says:

These patches are based on Martin Petersen's 6.4/scsi-queue tree
  https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
  6.4/scsi-queue

This set of changes consists of:

 * Map entire BAR 0.  The driver was mapping up to and including the
   controller registers, but not all of BAR 0.

 * Add PCI IDs to support new controllers.

 * Clean up some code by removing unnecessary NULL checks.  This cleanup is
   a result of a Coverity report.

 * Correct a rare memory leak whenever pqi_sas_port_add_rhpy() returns an
   error. This was Suggested by: Yang Yingliang <yangyingliang@huawei.com>

 * Remove atomic operations on variable raid_bypass_cnt. Accuracy is not
   required for driver operation. Change type from atomic_t to unsigned
   int.

 * Correct a rare drive hot-plug removal issue where we get a NULL
   io_request. We added a check for this condition.

 * Turn on NCQ priority for AIO requests to disks comprising RAID devices.

 * Correct byte aligned writew() operations on some ARM servers. Changed
   the writew() to two writeb() operations.

 * Change how the driver checks for a sanitize operation in progress.  We
   were using TEST UNIT READY. We removed the TEST UNIT READY code and are
   now using the controller's firmware information in order to avoid issues
   caused by drives failing to complete TEST UNIT READY.

 * Some customers have been requesting that we add the NUMA node to
   /sys/block/sd<scsi device>/device like the nvme driver does.

 * Update the copyright information to match the current year.

 * Bump the driver version to 2.1.22-040.

Link: https://lore.kernel.org/r/20230428153712.297638-1-don.brace@microchip.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 79c67c54 fcb40511
# #
# Kernel configuration file for the SMARTPQI # Kernel configuration file for the SMARTPQI
# #
# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries # Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation # Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation # Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc. # Copyright (c) 2016 PMC-Sierra, Inc.
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* /*
* driver for Microchip PQI-based storage controllers * driver for Microchip PQI-based storage controllers
* Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc. * Copyright (c) 2016 PMC-Sierra, Inc.
* *
...@@ -1108,6 +1108,7 @@ struct pqi_scsi_dev { ...@@ -1108,6 +1108,7 @@ struct pqi_scsi_dev {
u8 volume_offline : 1; u8 volume_offline : 1;
u8 rescan : 1; u8 rescan : 1;
u8 ignore_device : 1; u8 ignore_device : 1;
u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */ bool aio_enabled; /* only valid for physical disks */
bool in_remove; bool in_remove;
bool device_offline; bool device_offline;
...@@ -1147,7 +1148,7 @@ struct pqi_scsi_dev { ...@@ -1147,7 +1148,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
atomic_t raid_bypass_cnt; unsigned int raid_bypass_cnt;
}; };
/* VPD inquiry pages */ /* VPD inquiry pages */
...@@ -1357,6 +1358,7 @@ struct pqi_ctrl_info { ...@@ -1357,6 +1358,7 @@ struct pqi_ctrl_info {
u32 max_write_raid_5_6; u32 max_write_raid_5_6;
u32 max_write_raid_1_10_2drive; u32 max_write_raid_1_10_2drive;
u32 max_write_raid_1_10_3drive; u32 max_write_raid_1_10_3drive;
int numa_node;
struct list_head scsi_device_list; struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock; spinlock_t scsi_device_list_lock;
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* driver for Microchip PQI-based storage controllers * driver for Microchip PQI-based storage controllers
* Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc. * Copyright (c) 2016 PMC-Sierra, Inc.
* *
...@@ -33,11 +33,11 @@ ...@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP #define BUILD_TIMESTAMP
#endif #endif
#define DRIVER_VERSION "2.1.20-035" #define DRIVER_VERSION "2.1.22-040"
#define DRIVER_MAJOR 2 #define DRIVER_MAJOR 2
#define DRIVER_MINOR 1 #define DRIVER_MINOR 1
#define DRIVER_RELEASE 20 #define DRIVER_RELEASE 22
#define DRIVER_REVISION 35 #define DRIVER_REVISION 40
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")" DRIVER_VERSION BUILD_TIMESTAMP ")"
...@@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) ...@@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
writeb(status, ctrl_info->soft_reset_status); writeb(status, ctrl_info->soft_reset_status);
} }
static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
{
bool io_high_prio;
int priority_class;
io_high_prio = false;
if (device->ncq_prio_enable) {
priority_class =
IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
if (priority_class == IOPRIO_CLASS_RT) {
/* Set NCQ priority for read/write commands. */
switch (scmd->cmnd[0]) {
case WRITE_16:
case READ_16:
case WRITE_12:
case READ_12:
case WRITE_10:
case READ_10:
case WRITE_6:
case READ_6:
io_high_prio = true;
break;
}
}
}
return io_high_prio;
}
static int pqi_map_single(struct pci_dev *pci_dev, static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer, struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction) size_t buffer_length, enum dma_data_direction data_direction)
...@@ -578,10 +608,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, ...@@ -578,10 +608,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb = request->cdb; cdb = request->cdb;
switch (cmd) { switch (cmd) {
case TEST_UNIT_READY:
request->data_direction = SOP_READ_FLAG;
cdb[0] = TEST_UNIT_READY;
break;
case INQUIRY: case INQUIRY:
request->data_direction = SOP_READ_FLAG; request->data_direction = SOP_READ_FLAG;
cdb[0] = INQUIRY; cdb[0] = INQUIRY;
...@@ -708,7 +734,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info * ...@@ -708,7 +734,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *
} }
} }
pqi_reinit_io_request(io_request); if (io_request)
pqi_reinit_io_request(io_request);
return io_request; return io_request;
} }
...@@ -1588,6 +1615,7 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, ...@@ -1588,6 +1615,7 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct pqi_scsi_dev *device,
...@@ -1636,6 +1664,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, ...@@ -1636,6 +1664,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
PQI_DEVICE_NCQ_PRIO_SUPPORTED); PQI_DEVICE_NCQ_PRIO_SUPPORTED);
device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
return 0; return 0;
} }
...@@ -1681,7 +1711,7 @@ static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, ...@@ -1681,7 +1711,7 @@ static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
/* /*
* Prevent adding drive to OS for some corner cases such as a drive * Prevent adding drive to OS for some corner cases such as a drive
* undergoing a sanitize operation. Some OSes will continue to poll * undergoing a sanitize (erase) operation. Some OSes will continue to poll
* the drive until the sanitize completes, which can take hours, * the drive until the sanitize completes, which can take hours,
* resulting in long bootup delays. Commands such as TUR, READ_CAP * resulting in long bootup delays. Commands such as TUR, READ_CAP
* are allowed, but READ/WRITE cause check condition. So the OS * are allowed, but READ/WRITE cause check condition. So the OS
...@@ -1689,73 +1719,9 @@ static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, ...@@ -1689,73 +1719,9 @@ static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
* Note: devices that have completed sanitize must be re-enabled * Note: devices that have completed sanitize must be re-enabled
* using the management utility. * using the management utility.
*/ */
static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info, static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
struct pqi_scsi_dev *device)
{ {
u8 scsi_status; return device->erase_in_progress;
int rc;
enum dma_data_direction dir;
char *buffer;
int buffer_length = 64;
size_t sense_data_length;
struct scsi_sense_hdr sshdr;
struct pqi_raid_path_request request;
struct pqi_raid_error_info error_info;
bool offline = false; /* Assume keep online */
/* Do not check controllers. */
if (pqi_is_hba_lunid(device->scsi3addr))
return false;
/* Do not check LVs. */
if (pqi_is_logical_device(device))
return false;
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return false; /* Assume not offline */
/* Check for SANITIZE in progress using TUR */
rc = pqi_build_raid_path_request(ctrl_info, &request,
TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
buffer_length, 0, &dir);
if (rc)
goto out; /* Assume not offline */
memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
if (rc)
goto out; /* Assume not offline */
scsi_status = error_info.status;
sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
if (sense_data_length == 0)
sense_data_length =
get_unaligned_le16(&error_info.response_data_length);
if (sense_data_length) {
if (sense_data_length > sizeof(error_info.data))
sense_data_length = sizeof(error_info.data);
/*
* Check for sanitize in progress: asc:0x04, ascq: 0x1b
*/
if (scsi_status == SAM_STAT_CHECK_CONDITION &&
scsi_normalize_sense(error_info.data,
sense_data_length, &sshdr) &&
sshdr.sense_key == NOT_READY &&
sshdr.asc == 0x04 &&
sshdr.ascq == 0x1b) {
device->device_offline = true;
offline = true;
goto out; /* Keep device offline */
}
}
out:
kfree(buffer);
return offline;
} }
static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
...@@ -2499,10 +2465,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) ...@@ -2499,10 +2465,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (!pqi_is_supported_device(device)) if (!pqi_is_supported_device(device))
continue; continue;
/* Do not present disks that the OS cannot fully probe */
if (pqi_keep_device_offline(ctrl_info, device))
continue;
/* Gather information about the device. */ /* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device, id_phys); rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
...@@ -2525,6 +2487,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) ...@@ -2525,6 +2487,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue; continue;
} }
/* Do not present disks that the OS cannot fully probe. */
if (pqi_keep_device_offline(device))
continue;
pqi_assign_bus_target_lun(device); pqi_assign_bus_target_lun(device);
if (device->is_physical_device) { if (device->is_physical_device) {
...@@ -5504,15 +5470,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request, ...@@ -5504,15 +5470,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd); pqi_scsi_done(scmd);
} }
static int pqi_raid_submit_scsi_cmd_with_io_request( static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group) struct pqi_queue_group *queue_group, bool io_high_prio)
{ {
int rc; int rc;
size_t cdb_length; size_t cdb_length;
struct pqi_io_request *io_request;
struct pqi_raid_path_request *request; struct pqi_raid_path_request *request;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_raid_io_complete; io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd; io_request->scmd = scmd;
...@@ -5522,6 +5492,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( ...@@ -5522,6 +5492,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id); put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id; request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
...@@ -5587,14 +5558,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, ...@@ -5587,14 +5558,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group) struct pqi_queue_group *queue_group)
{ {
struct pqi_io_request *io_request; bool io_high_prio;
io_request = pqi_alloc_io_request(ctrl_info, scmd); io_high_prio = pqi_is_io_high_priority(device, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
device, scmd, queue_group);
} }
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
...@@ -5639,44 +5607,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request, ...@@ -5639,44 +5607,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd); pqi_scsi_done(scmd);
} }
static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
{
bool io_high_prio;
int priority_class;
io_high_prio = false;
if (device->ncq_prio_enable) {
priority_class =
IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
if (priority_class == IOPRIO_CLASS_RT) {
/* Set NCQ priority for read/write commands. */
switch (scmd->cmnd[0]) {
case WRITE_16:
case READ_16:
case WRITE_12:
case READ_12:
case WRITE_10:
case READ_10:
case WRITE_6:
case READ_6:
io_high_prio = true;
break;
}
}
}
return io_high_prio;
}
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group) struct pqi_queue_group *queue_group)
{ {
bool io_high_prio; bool io_high_prio;
io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd); io_high_prio = pqi_is_io_high_priority(device, scmd);
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
scmd->cmnd, scmd->cmd_len, queue_group, NULL, scmd->cmnd, scmd->cmd_len, queue_group, NULL,
...@@ -5694,10 +5631,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, ...@@ -5694,10 +5631,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request; struct pqi_aio_path_request *request;
struct pqi_scsi_dev *device; struct pqi_scsi_dev *device;
device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, scmd); io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request) if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete; io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd; io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass; io_request->raid_bypass = raid_bypass;
...@@ -5712,6 +5649,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, ...@@ -5712,6 +5649,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio; request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id); put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id; request->error_index = request->request_id;
device = scmd->device->hostdata;
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number); put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb)) if (cdb_length > sizeof(request->cdb))
...@@ -6052,7 +5990,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm ...@@ -6052,7 +5990,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true; raid_bypassed = true;
atomic_inc(&device->raid_bypass_cnt); device->raid_bypass_cnt++;
} }
} }
if (!raid_bypassed) if (!raid_bypassed)
...@@ -7288,7 +7226,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, ...@@ -7288,7 +7226,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev; struct scsi_device *sdev;
struct pqi_scsi_dev *device; struct pqi_scsi_dev *device;
unsigned long flags; unsigned long flags;
int raid_bypass_cnt; unsigned int raid_bypass_cnt;
sdev = to_scsi_device(dev); sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host); ctrl_info = shost_to_hba(sdev->host);
...@@ -7304,7 +7242,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, ...@@ -7304,7 +7242,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV; return -ENODEV;
} }
raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); raid_bypass_cnt = device->raid_bypass_cnt;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
...@@ -7366,8 +7304,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, ...@@ -7366,8 +7304,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return -ENODEV; return -ENODEV;
} }
if (!device->ncq_prio_support || if (!device->ncq_prio_support) {
!device->is_physical_device) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -EINVAL; return -EINVAL;
} }
...@@ -7379,6 +7316,18 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, ...@@ -7379,6 +7316,18 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return strlen(buf); return strlen(buf);
} }
static ssize_t pqi_numa_node_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
struct scsi_device *sdev;
struct pqi_ctrl_info *ctrl_info;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
}
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
...@@ -7388,6 +7337,7 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); ...@@ -7388,6 +7337,7 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644, static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
static struct attribute *pqi_sdev_attrs[] = { static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr, &dev_attr_lunid.attr,
...@@ -7398,6 +7348,7 @@ static struct attribute *pqi_sdev_attrs[] = { ...@@ -7398,6 +7348,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_level.attr, &dev_attr_raid_level.attr,
&dev_attr_raid_bypass_cnt.attr, &dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr, &dev_attr_sas_ncq_prio_enable.attr,
&dev_attr_numa_node.attr,
NULL NULL
}; };
...@@ -7716,8 +7667,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, ...@@ -7716,8 +7667,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
features_requested_iomem_addr + features_requested_iomem_addr +
(le16_to_cpu(firmware_features->num_elements) * 2) + (le16_to_cpu(firmware_features->num_elements) * 2) +
sizeof(__le16); sizeof(__le16);
writew(PQI_FIRMWARE_FEATURE_MAXIMUM, writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
host_max_known_feature_iomem_addr); writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
} }
return pqi_config_table_update(ctrl_info, return pqi_config_table_update(ctrl_info,
...@@ -8560,7 +8511,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) ...@@ -8560,7 +8511,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->iomem_base = ioremap(pci_resource_start( ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0), ctrl_info->pci_dev, 0),
sizeof(struct pqi_ctrl_registers)); pci_resource_len(ctrl_info->pci_dev, 0));
if (!ctrl_info->iomem_base) { if (!ctrl_info->iomem_base) {
dev_err(&ctrl_info->pci_dev->dev, dev_err(&ctrl_info->pci_dev->dev,
"failed to map memory for controller registers\n"); "failed to map memory for controller registers\n");
...@@ -9018,6 +8969,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev, ...@@ -9018,6 +8969,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
"failed to allocate controller info block\n"); "failed to allocate controller info block\n");
return -ENOMEM; return -ENOMEM;
} }
ctrl_info->numa_node = node;
ctrl_info->pci_dev = pci_dev; ctrl_info->pci_dev = pci_dev;
...@@ -9927,6 +9879,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { ...@@ -9927,6 +9879,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f0c, 0x3161) 0x1f0c, 0x3161)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0804)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0805)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0806)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5445) 0x1cf2, 0x5445)
...@@ -9963,6 +9927,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { ...@@ -9963,6 +9927,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x544f) 0x1cf2, 0x544f)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x54da)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x54db)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x54dc)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0b27) 0x1cf2, 0x0b27)
...@@ -10015,6 +9991,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { ...@@ -10015,6 +9991,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0623) PCI_VENDOR_ID_LENOVO, 0x0623)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1014, 0x0718)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000) 0x1e93, 0x1000)
...@@ -10027,6 +10007,50 @@ static const struct pci_device_id pqi_pci_id_table[] = { ...@@ -10027,6 +10007,50 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1002) 0x1e93, 0x1002)
}, },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1005)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1001)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1002)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1003)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1004)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1005)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1006)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1007)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1008)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1009)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100a)
},
{ {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID) PCI_ANY_ID, PCI_ANY_ID)
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* driver for Microchip PQI-based storage controllers * driver for Microchip PQI-based storage controllers
* Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc. * Copyright (c) 2016 PMC-Sierra, Inc.
* *
...@@ -92,25 +92,23 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port, ...@@ -92,25 +92,23 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify; identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address; identify->sas_address = pqi_sas_port->sas_address;
identify->phy_identifier = pqi_sas_port->device->phy_id;
identify->initiator_port_protocols = SAS_PROTOCOL_ALL; identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
identify->target_port_protocols = SAS_PROTOCOL_STP; identify->target_port_protocols = SAS_PROTOCOL_STP;
if (pqi_sas_port->device) { switch (pqi_sas_port->device->device_type) {
identify->phy_identifier = pqi_sas_port->device->phy_id; case SA_DEVICE_TYPE_SAS:
switch (pqi_sas_port->device->device_type) { case SA_DEVICE_TYPE_SES:
case SA_DEVICE_TYPE_SAS: case SA_DEVICE_TYPE_NVME:
case SA_DEVICE_TYPE_SES: identify->target_port_protocols = SAS_PROTOCOL_SSP;
case SA_DEVICE_TYPE_NVME: break;
identify->target_port_protocols = SAS_PROTOCOL_SSP; case SA_DEVICE_TYPE_EXPANDER_SMP:
break; identify->target_port_protocols = SAS_PROTOCOL_SMP;
case SA_DEVICE_TYPE_EXPANDER_SMP: break;
identify->target_port_protocols = SAS_PROTOCOL_SMP; case SA_DEVICE_TYPE_SATA:
break; default:
case SA_DEVICE_TYPE_SATA: break;
default:
break;
}
} }
return sas_rphy_add(rphy); return sas_rphy_add(rphy);
...@@ -295,10 +293,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, ...@@ -295,10 +293,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy); rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
if (rc) if (rc)
goto free_sas_port; goto free_sas_rphy;
return 0; return 0;
free_sas_rphy:
sas_rphy_free(rphy);
free_sas_port: free_sas_port:
pqi_free_sas_port(pqi_sas_port); pqi_free_sas_port(pqi_sas_port);
device->sas_port = NULL; device->sas_port = NULL;
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* driver for Microchip PQI-based storage controllers * driver for Microchip PQI-based storage controllers
* Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc. * Copyright (c) 2016 PMC-Sierra, Inc.
* *
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* /*
* driver for Microchip PQI-based storage controllers * driver for Microchip PQI-based storage controllers
* Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc. * Copyright (c) 2016 PMC-Sierra, Inc.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment