Commit 1733348b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "Mostly minor items this time around, the most notable being a FILEIO
  backend change to enforce hw_max_sectors based upon the current
  block_size to address a bug where large sized I/Os (> 1M) where being
  rejected"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure
  target: Remove extra percpu_ref_init
  target/file: Update hw_max_sectors based on current block_size
  iser-target: Move INIT_WORK setup into isert_create_device_ib_res
  iscsi-target: Fix incorrect np->np_thread NULL assignment
  qla2xxx: Fix schedule_delayed_work() for target timeout calculations
  iser-target: fix error return code in isert_create_device_ib_res()
  iscsi-target: Fix-up all zero data-length CDBs with R/W_BIT set
  target: Remove write-only stats fields and lock from struct se_node_acl
  iscsi-target: return -EINVAL on oversized configfs parameter
parents a8472b4b dcd21199
...@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) ...@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL; isert_conn->conn_rx_descs = NULL;
} }
static void isert_cq_tx_work(struct work_struct *);
static void isert_cq_tx_callback(struct ib_cq *, void *); static void isert_cq_tx_callback(struct ib_cq *, void *);
static void isert_cq_rx_work(struct work_struct *);
static void isert_cq_rx_callback(struct ib_cq *, void *); static void isert_cq_rx_callback(struct ib_cq *, void *);
static int static int
...@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device) ...@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
cq_desc[i].device = device; cq_desc[i].device = device;
cq_desc[i].cq_index = i; cq_desc[i].cq_index = i;
INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
device->dev_rx_cq[i] = ib_create_cq(device->ib_device, device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_rx_callback, isert_cq_rx_callback,
isert_cq_event_callback, isert_cq_event_callback,
(void *)&cq_desc[i], (void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i); ISER_MAX_RX_CQ_LEN, i);
if (IS_ERR(device->dev_rx_cq[i])) if (IS_ERR(device->dev_rx_cq[i])) {
ret = PTR_ERR(device->dev_rx_cq[i]);
device->dev_rx_cq[i] = NULL;
goto out_cq; goto out_cq;
}
INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
device->dev_tx_cq[i] = ib_create_cq(device->ib_device, device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_tx_callback, isert_cq_tx_callback,
isert_cq_event_callback, isert_cq_event_callback,
(void *)&cq_desc[i], (void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i); ISER_MAX_TX_CQ_LEN, i);
if (IS_ERR(device->dev_tx_cq[i])) if (IS_ERR(device->dev_tx_cq[i])) {
ret = PTR_ERR(device->dev_tx_cq[i]);
device->dev_tx_cq[i] = NULL;
goto out_cq; goto out_cq;
}
if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
if (ret)
goto out_cq; goto out_cq;
if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
if (ret)
goto out_cq; goto out_cq;
} }
...@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context) ...@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
{ {
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
queue_work(isert_comp_wq, &cq_desc->cq_tx_work); queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
} }
...@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context) ...@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
{ {
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
queue_work(isert_rx_wq, &cq_desc->cq_rx_work); queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
} }
......
...@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, ...@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
schedule_delayed_work(&tgt->sess_del_work, 0); schedule_delayed_work(&tgt->sess_del_work, 0);
else else
schedule_delayed_work(&tgt->sess_del_work, schedule_delayed_work(&tgt->sess_del_work,
jiffies - sess->expires); sess->expires - jiffies);
} }
/* ha->hardware_lock supposed to be held on entry */ /* ha->hardware_lock supposed to be held on entry */
...@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) ...@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
struct scsi_qla_host *vha = tgt->vha; struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess; struct qla_tgt_sess *sess;
unsigned long flags; unsigned long flags, elapsed;
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
while (!list_empty(&tgt->del_sess_list)) { while (!list_empty(&tgt->del_sess_list)) {
sess = list_entry(tgt->del_sess_list.next, typeof(*sess), sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
del_list_entry); del_list_entry);
if (time_after_eq(jiffies, sess->expires)) { elapsed = jiffies;
if (time_after_eq(elapsed, sess->expires)) {
qlt_undelete_sess(sess); qlt_undelete_sess(sess);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
...@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) ...@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
} else { } else {
schedule_delayed_work(&tgt->sess_del_work, schedule_delayed_work(&tgt->sess_del_work,
jiffies - sess->expires); sess->expires - elapsed);
break; break;
} }
} }
...@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, ...@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
if (rc != 0) { if (rc != 0) {
ha->tgt.tgt_ops = NULL; ha->tgt.tgt_ops = NULL;
ha->tgt.target_lport_ptr = NULL; ha->tgt.target_lport_ptr = NULL;
scsi_host_put(host);
} }
mutex_unlock(&qla_tgt_mutex); mutex_unlock(&qla_tgt_mutex);
return rc; return rc;
......
...@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np) ...@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
*/ */
send_sig(SIGINT, np->np_thread, 1); send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread); kthread_stop(np->np_thread);
np->np_thread = NULL;
} }
np->np_transport->iscsit_free_np(np); np->np_transport->iscsit_free_np(np);
...@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ...@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (((hdr->flags & ISCSI_FLAG_CMD_READ) || if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/* /*
* Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) * From RFC-3720 Section 10.3.1:
* that adds support for RESERVE/RELEASE. There is a bug *
* add with this new functionality that sets R/W bits when * "Either or both of R and W MAY be 1 when either the
* neither CDB carries any READ or WRITE datapayloads. * Expected Data Transfer Length and/or Bidirectional Read
* Expected Data Transfer Length are 0"
*
* For this case, go ahead and clear the unnecssary bits
* to avoid any confusion with ->data_direction.
*/ */
if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { hdr->flags &= ~ISCSI_FLAG_CMD_READ;
hdr->flags &= ~ISCSI_FLAG_CMD_READ; hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
goto done;
}
pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for" " set when Expected Data Transfer Length is 0 for"
" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_INVALID, buf);
} }
done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
......
...@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \ ...@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
\ \
if (!capable(CAP_SYS_ADMIN)) \ if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \ return -EPERM; \
\ if (count >= sizeof(auth->name)) \
return -EINVAL; \
snprintf(auth->name, sizeof(auth->name), "%s", page); \ snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \ if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \ auth->naf_flags &= ~flags; \
......
...@@ -1403,11 +1403,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1403,11 +1403,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
out: out:
stop = kthread_should_stop(); stop = kthread_should_stop();
if (!stop && signal_pending(current)) {
spin_lock_bh(&np->np_thread_lock);
stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
spin_unlock_bh(&np->np_thread_lock);
}
/* Wait for another socket.. */ /* Wait for another socket.. */
if (!stop) if (!stop)
return 1; return 1;
...@@ -1415,7 +1410,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1415,7 +1410,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
iscsi_stop_login_thread_timer(np); iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock); spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT; np->np_thread_state = ISCSI_NP_THREAD_EXIT;
np->np_thread = NULL;
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
return 0; return 0;
......
...@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) ...@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
dev->dev_attrib.block_size = block_size; dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n", pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size); dev, block_size);
if (dev->dev_attrib.max_bytes_per_io)
dev->dev_attrib.hw_max_sectors =
dev->dev_attrib.max_bytes_per_io / block_size;
return 0; return 0;
} }
......
...@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) ...@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION, " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION); TARGET_CORE_MOD_VERSION);
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
" MaxSectors: %u\n", hba->hba_id, fd_host->fd_host_id);
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0; return 0;
} }
...@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev) ...@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
} }
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
......
...@@ -7,7 +7,10 @@ ...@@ -7,7 +7,10 @@
#define FD_DEVICE_QUEUE_DEPTH 32 #define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512 #define FD_BLOCKSIZE 512
#define FD_MAX_SECTORS 2048 /*
* Limited by the number of iovecs (2048) per vfs_[writev,readv] call
*/
#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01 #define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02 #define RRF_GOT_LBA 0x02
......
...@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( ...@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg; acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl); tpg->se_tpg_tfo->set_default_node_attributes(acl);
...@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg; acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
tpg->se_tpg_tfo->set_default_node_attributes(acl); tpg->se_tpg_tfo->set_default_node_attributes(acl);
...@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) ...@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
spin_lock_init(&lun->lun_sep_lock); spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp); init_completion(&lun->lun_ref_comp);
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
if (ret < 0)
return ret;
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0) { if (ret < 0)
percpu_ref_cancel_init(&lun->lun_ref);
return ret; return ret;
}
return 0; return 0;
} }
......
...@@ -517,10 +517,6 @@ struct se_node_acl { ...@@ -517,10 +517,6 @@ struct se_node_acl {
u32 acl_index; u32 acl_index;
#define MAX_ACL_TAG_SIZE 64 #define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE]; char acl_tag[MAX_ACL_TAG_SIZE];
u64 num_cmds;
u64 read_bytes;
u64 write_bytes;
spinlock_t stats_lock;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count; atomic_t acl_pr_ref_count;
struct se_dev_entry **device_list; struct se_dev_entry **device_list;
...@@ -624,6 +620,7 @@ struct se_dev_attrib { ...@@ -624,6 +620,7 @@ struct se_dev_attrib {
u32 unmap_granularity; u32 unmap_granularity;
u32 unmap_granularity_alignment; u32 unmap_granularity_alignment;
u32 max_write_same_len; u32 max_write_same_len;
u32 max_bytes_per_io;
struct se_device *da_dev; struct se_device *da_dev;
struct config_group da_group; struct config_group da_group;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment