Commit debd52a0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Three small bug fixes (barrier elimination, memory leak on unload,
  spinlock recursion) and a technical enhancement left over from the
  merge window: the TCMU read length support is required for tape
  devices read when the length of the read is greater than the tape
  block size"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: scsi_debug: Fix memory leak on module unload
  scsi: qla2xxx: Spinlock recursion in qla_target
  scsi: ipr: Eliminate duplicate barriers
  scsi: target: tcmu: add read length support
parents c92067ae 52ab9768
...@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, ...@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->hrrq[i].allow_interrupts = 0; ioa_cfg->hrrq[i].allow_interrupts = 0;
spin_unlock(&ioa_cfg->hrrq[i]._lock); spin_unlock(&ioa_cfg->hrrq[i]._lock);
} }
wmb();
/* Set interrupt mask to stop all new interrupts */ /* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64) if (ioa_cfg->sis64)
...@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) ...@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ioa_cfg->hrrq[i].allow_interrupts = 1; ioa_cfg->hrrq[i].allow_interrupts = 1;
spin_unlock(&ioa_cfg->hrrq[i]._lock); spin_unlock(&ioa_cfg->hrrq[i]._lock);
} }
wmb();
if (ioa_cfg->sis64) { if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */ /* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
......
...@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) ...@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
void qlt_schedule_sess_for_deletion(struct fc_port *sess) void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{ {
struct qla_tgt *tgt = sess->tgt; struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = sess->vha->hw;
unsigned long flags; unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND) if (sess->disc_state == DSC_DELETE_PEND)
...@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) ...@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
return; return;
} }
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->deleted == QLA_SESS_DELETED) if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0; sess->logout_on_delete = 0;
spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); spin_unlock_irqrestore(&sess->vha->work_lock, flags);
return; return;
} }
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); spin_unlock_irqrestore(&sess->vha->work_lock, flags);
sess->disc_state = DSC_DELETE_PEND; sess->disc_state = DSC_DELETE_PEND;
......
...@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void) ...@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
int k = sdebug_add_host; int k = sdebug_add_host;
stop_all_queued(); stop_all_queued();
free_all_queued();
for (; k; k--) for (; k; k--)
sdebug_remove_adapter(); sdebug_remove_adapter();
free_all_queued();
driver_unregister(&sdebug_driverfs_driver); driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus); bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary); root_device_unregister(pseudo_primary);
......
...@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev, ...@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
} }
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
bool bidi) bool bidi, uint32_t read_len)
{ {
struct se_cmd *se_cmd = cmd->se_cmd; struct se_cmd *se_cmd = cmd->se_cmd;
int i, dbi; int i, dbi;
...@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, ...@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
for_each_sg(data_sg, sg, data_nents, i) { for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length; int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset; to = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) { while (sg_remaining > 0 && read_len > 0) {
if (block_remaining == 0) { if (block_remaining == 0) {
if (from) if (from)
kunmap_atomic(from); kunmap_atomic(from);
...@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, ...@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
} }
copy_bytes = min_t(size_t, sg_remaining, copy_bytes = min_t(size_t, sg_remaining,
block_remaining); block_remaining);
if (read_len < copy_bytes)
copy_bytes = read_len;
offset = DATA_BLOCK_SIZE - block_remaining; offset = DATA_BLOCK_SIZE - block_remaining;
tcmu_flush_dcache_range(from, copy_bytes); tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from + offset, memcpy(to + sg->length - sg_remaining, from + offset,
...@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, ...@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
sg_remaining -= copy_bytes; sg_remaining -= copy_bytes;
block_remaining -= copy_bytes; block_remaining -= copy_bytes;
read_len -= copy_bytes;
} }
kunmap_atomic(to - sg->offset); kunmap_atomic(to - sg->offset);
if (read_len == 0)
break;
} }
if (from) if (from)
kunmap_atomic(from); kunmap_atomic(from);
...@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * ...@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
{ {
struct se_cmd *se_cmd = cmd->se_cmd; struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev; struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
uint32_t read_len = se_cmd->data_length;
/* /*
* cmd has been completed already from timeout, just reclaim * cmd has been completed already from timeout, just reclaim
...@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * ...@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd); cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { goto done;
}
if (se_cmd->data_direction == DMA_FROM_DEVICE &&
(entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
read_len_valid = true;
if (entry->rsp.read_len < read_len)
read_len = entry->rsp.read_len;
}
if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) { if (!read_len_valid )
goto done;
else
se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
}
if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */ /* Get Data-In buffer before clean up */
gather_data_area(udev, cmd, true); gather_data_area(udev, cmd, true, read_len);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) { } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
gather_data_area(udev, cmd, false); gather_data_area(udev, cmd, false, read_len);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) { } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
/* TODO: */ /* TODO: */
} else if (se_cmd->data_direction != DMA_NONE) { } else if (se_cmd->data_direction != DMA_NONE) {
...@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * ...@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->data_direction); se_cmd->data_direction);
} }
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); done:
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
entry->rsp.scsi_status, read_len);
} else
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out: out:
cmd->se_cmd = NULL; cmd->se_cmd = NULL;
...@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev) ...@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
/* Initialise the mailbox of the ring buffer */ /* Initialise the mailbox of the ring buffer */
mb = udev->mb_addr; mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION; mb->version = TCMU_MAILBOX_VERSION;
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
mb->cmdr_off = CMDR_OFF; mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size; mb->cmdr_size = udev->cmdr_size;
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#define TCMU_MAILBOX_VERSION 2 #define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */ #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */ #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
struct tcmu_mailbox { struct tcmu_mailbox {
__u16 version; __u16 version;
...@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr { ...@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
__u16 cmd_id; __u16 cmd_id;
__u8 kflags; __u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_UNKNOWN_OP 0x1
#define TCMU_UFLAG_READ_LEN 0x2
__u8 uflags; __u8 uflags;
} __packed; } __packed;
...@@ -119,7 +121,7 @@ struct tcmu_cmd_entry { ...@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
__u8 scsi_status; __u8 scsi_status;
__u8 __pad1; __u8 __pad1;
__u16 __pad2; __u16 __pad2;
__u32 __pad3; __u32 read_len;
char sense_buffer[TCMU_SENSE_BUFFERSIZE]; char sense_buffer[TCMU_SENSE_BUFFERSIZE];
} rsp; } rsp;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment