Commit 5bd665f2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull target updates from Nicholas Bellinger:
 "It has been a very busy development cycle this time around in target
  land, with the highlights including:

   - Kill struct se_subsystem_dev, in favor of direct se_device usage
     (hch)
   - Simplify reservations code by combining SPC-3 + SCSI-2 support for
     virtual backends only (hch)
   - Simplify ALUA code for virtual only backends, and remove left over
     abstractions (hch)
   - Pass sense_reason_t as return value for I/O submission path (hch)
   - Refactor MODE_SENSE emulation to allow for easier addition of new
     mode pages.  (roland)
   - Add emulation of MODE_SELECT (roland)
   - Fix bug in handling of ExpStatSN wrap-around (steve)
   - Fix bug in TMR ABORT_TASK lookup in qla2xxx target (steve)
   - Add WRITE_SAME w/ UNMAP=0 support for IBLOCK backends (nab)
   - Convert ib_srpt to use modern target_submit_cmd caller + drop
     legacy ioctx->kref usage (nab)
   - Convert ib_srpt to use modern target_submit_tmr caller (nab)
   - Add link_magic for fabric allow_link destination target_items for
     symlinks within target_core_fabric_configfs.c code (nab)
   - Allocate pointers in instead of full structs for
     config_group->default_groups (sebastian)
   - Fix 32-bit highmem breakage for FILEIO (sebastian)

  All told, hch was able to shave off another ~1K LOC by killing the
  se_subsystem_dev abstraction, along with a number of PR + ALUA
  simplifications.  Also, a nice patch by Roland is the refactoring of
  MODE_SENSE handling, along with the addition of initial MODE_SELECT
  emulation support for virtual backends.

  Sebastian found a long-standing issue wrt to allocation of full
  config_group instead of pointers for config_group->default_group[]
  setup in a number of areas, which ends up saving memory with big
  configurations.  He also managed to fix another long-standing BUG wrt
  to broken 32-bit highmem support within the FILEIO backend driver.

  Thank you again to everyone who contributed this round!"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (50 commits)
  target/iscsi_target: Add NodeACL tags for initiator group support
  target/tcm_fc: fix the lockdep warning due to inconsistent lock state
  sbp-target: fix error path in sbp_make_tpg()
  sbp-target: use simple assignment in tgt_agent_rw_agent_state()
  iscsi-target: use kstrdup() for iscsi_param
  target/file: merge fd_do_readv() and fd_do_writev()
  target/file: Fix 32-bit highmem breakage for SGL -> iovec mapping
  target: Add link_magic for fabric allow_link destination target_items
  ib_srpt: Convert TMR path to target_submit_tmr
  ib_srpt: Convert I/O path to target_submit_cmd + drop legacy ioctx->kref
  target: Make spc_get_write_same_sectors return sector_t
  target/configfs: use kmalloc() instead of kzalloc() for default groups
  target/configfs: allocate only 6 slots for dev_cg->default_groups
  target/configfs: allocate pointers instead of full struct for default_groups
  target: update error handling for sbc_setup_write_same()
  iscsit: use GFP_ATOMIC under spin lock
  iscsi_target: Remove redundant null check before kfree
  target/iblock: Forward declare bio helpers
  target: Clean up flow in transport_check_aborted_status()
  target: Clean up logic in transport_put_cmd()
  ...
parents 115b1cc2 79e62fc3
...@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) ...@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx; return ioctx;
BUG_ON(ioctx->ch != ch); BUG_ON(ioctx->ch != ch);
kref_init(&ioctx->kref);
spin_lock_init(&ioctx->spinlock); spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW; ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0; ioctx->n_rbuf = 0;
...@@ -1290,39 +1289,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) ...@@ -1290,39 +1289,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx; return ioctx;
} }
/**
* srpt_put_send_ioctx() - Free up resources.
*/
static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
{
struct srpt_rdma_ch *ch;
unsigned long flags;
BUG_ON(!ioctx);
ch = ioctx->ch;
BUG_ON(!ch);
WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
if (ioctx->n_rbuf > 1) {
kfree(ioctx->rbufs);
ioctx->rbufs = NULL;
ioctx->n_rbuf = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
}
static void srpt_put_send_ioctx_kref(struct kref *kref)
{
srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
}
/** /**
* srpt_abort_cmd() - Abort a SCSI command. * srpt_abort_cmd() - Abort a SCSI command.
* @ioctx: I/O context associated with the SCSI command. * @ioctx: I/O context associated with the SCSI command.
...@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
} }
spin_unlock_irqrestore(&ioctx->spinlock, flags); spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (state == SRPT_STATE_DONE) if (state == SRPT_STATE_DONE) {
struct srpt_rdma_ch *ch = ioctx->ch;
BUG_ON(ch->sess == NULL);
target_put_sess_cmd(ch->sess, &ioctx->cmd);
goto out; goto out;
}
pr_debug("Aborting cmd with state %d and tag %lld\n", state, pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->tag); ioctx->tag);
...@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP; ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break; break;
case SRPT_STATE_MGMT_RSP_SENT: case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break; break;
default: default:
WARN_ON("ERROR: unexpected command state"); WARN_ON("ERROR: unexpected command state");
...@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, ...@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
&& state != SRPT_STATE_DONE)) && state != SRPT_STATE_DONE))
pr_debug("state = %d\n", state); pr_debug("state = %d\n", state);
if (state != SRPT_STATE_DONE) if (state != SRPT_STATE_DONE) {
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); srpt_unmap_sg_to_ib_sge(ch, ioctx);
else transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
printk(KERN_ERR "IB completion has been received too late for" printk(KERN_ERR "IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index); " wr_id = %u.\n", ioctx->ioctx.index);
}
} }
/** /**
...@@ -1712,10 +1686,10 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) ...@@ -1712,10 +1686,10 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
static int srpt_check_stop_free(struct se_cmd *cmd) static int srpt_check_stop_free(struct se_cmd *cmd)
{ {
struct srpt_send_ioctx *ioctx; struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
} }
/** /**
...@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, ...@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun; uint64_t unpacked_lun;
u64 data_len; u64 data_len;
enum dma_data_direction dir; enum dma_data_direction dir;
int ret; sense_reason_t ret;
int rc;
BUG_ON(!send_ioctx); BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf; srp_cmd = recv_ioctx->ioctx.buf;
kref_get(&send_ioctx->kref);
cmd = &send_ioctx->cmd; cmd = &send_ioctx->cmd;
send_ioctx->tag = srp_cmd->tag; send_ioctx->tag = srp_cmd->tag;
...@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, ...@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break; break;
} }
ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len); if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
if (ret) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n", printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag); srp_cmd->tag);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; ret = TCM_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
goto send_sense; goto send_sense;
} }
cmd->data_length = data_len;
cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun)); sizeof(srp_cmd->lun));
if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) { rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); &send_ioctx->sense_data[0], unpacked_lun, data_len,
MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense; goto send_sense;
} }
ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
if (ret < 0) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
srpt_queue_status(cmd);
return 0;
} else
goto send_sense;
}
transport_handle_cdb_direct(cmd);
return 0; return 0;
send_sense: send_sense:
transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason, transport_send_check_condition_and_sense(cmd, ret, 0);
0);
return -1; return -1;
} }
...@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, ...@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
{ {
struct srp_tsk_mgmt *srp_tsk; struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd; struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun; uint64_t unpacked_lun;
uint32_t tag = 0;
int tcm_tmr; int tcm_tmr;
int res; int rc;
BUG_ON(!send_ioctx); BUG_ON(!send_ioctx);
...@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, ...@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag; send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) { if (tcm_tmr < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
goto process_tmr; goto fail;
}
res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
if (res < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto process_tmr;
} }
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun, unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun)); sizeof(srp_tsk->lun));
res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
if (res) {
pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
goto process_tmr;
}
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
process_tmr:
kref_get(&send_ioctx->kref);
if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
transport_generic_handle_tmr(&send_ioctx->cmd);
else
transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
if (rc < 0) {
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_DOES_NOT_EXIST;
goto fail;
}
tag = srp_tsk->task_tag;
}
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
srp_tsk, tcm_tmr, GFP_KERNEL, tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto fail;
}
return;
fail:
transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
} }
/** /**
...@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, ...@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
} }
} }
transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
0, DMA_NONE, MSG_SIMPLE_TAG,
send_ioctx->sense_data);
switch (srp_cmd->opcode) { switch (srp_cmd->opcode) {
case SRP_CMD: case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx); srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
...@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w) ...@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
{ {
struct srpt_rdma_ch *ch; struct srpt_rdma_ch *ch;
struct srpt_device *sdev; struct srpt_device *sdev;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work); ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
...@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w) ...@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
sdev = ch->sport->sdev; sdev = ch->sport->sdev;
BUG_ON(!sdev); BUG_ON(!sdev);
transport_deregister_session_configfs(ch->sess); se_sess = ch->sess;
transport_deregister_session(ch->sess); BUG_ON(!se_sess);
target_wait_for_sess_cmds(se_sess, 0);
transport_deregister_session_configfs(se_sess);
transport_deregister_session(se_sess);
ch->sess = NULL; ch->sess = NULL;
srpt_destroy_ch_ib(ch); srpt_destroy_ch_ib(ch);
...@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd) ...@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
ioctx->tag); ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
} }
out: out:
...@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) ...@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void srpt_release_cmd(struct se_cmd *se_cmd) static void srpt_release_cmd(struct se_cmd *se_cmd)
{ {
struct srpt_send_ioctx *ioctx = container_of(se_cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
unsigned long flags;
WARN_ON(ioctx->state != SRPT_STATE_DONE);
WARN_ON(ioctx->mapped_sg_count != 0);
if (ioctx->n_rbuf > 1) {
kfree(ioctx->rbufs);
ioctx->rbufs = NULL;
ioctx->n_rbuf = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
} }
/** /**
......
...@@ -228,7 +228,6 @@ struct srpt_recv_ioctx { ...@@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx { struct srpt_send_ioctx {
struct srpt_ioctx ioctx; struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch; struct srpt_rdma_ch *ch;
struct kref kref;
struct rdma_iu *rdma_ius; struct rdma_iu *rdma_ius;
struct srp_direct_buf *rbufs; struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf; struct srp_direct_buf single_rbuf;
......
...@@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, ...@@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd; struct qla_tgt_mgmt_cmd *mcmd;
struct se_cmd *se_cmd;
u32 lun = 0;
int rc; int rc;
bool found_lun = false;
spin_lock(&se_sess->sess_cmd_lock);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
struct qla_tgt_cmd *cmd =
container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
if (cmd->tag == abts->exchange_addr_to_abort) {
lun = cmd->unpacked_lun;
found_lun = true;
break;
}
}
spin_unlock(&se_sess->sess_cmd_lock);
if (!found_lun)
return -ENOENT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n", "qla_target(%d): task abort (tag=%d)\n",
...@@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, ...@@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess; mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK, rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort); abts->exchange_addr_to_abort);
if (rc != 0) { if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
......
...@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) ...@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return; return;
} }
cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; transport_generic_request_failure(&cmd->se_cmd,
transport_generic_request_failure(&cmd->se_cmd); TCM_CHECK_CONDITION_ABORT_CMD);
return; return;
} }
......
...@@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) ...@@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock); spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) && if ((cmd->i_state == ISTATE_SENT_STATUS) &&
(cmd->stat_sn < exp_statsn)) { iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE; cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock); spin_unlock(&cmd->istate_lock);
iscsit_add_cmd_to_immediate_queue(cmd, conn, iscsit_add_cmd_to_immediate_queue(cmd, conn,
...@@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd( ...@@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd(
struct iscsi_conn *conn, struct iscsi_conn *conn,
unsigned char *buf) unsigned char *buf)
{ {
int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret; int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
int dump_immediate_data = 0, send_check_condition = 0, payload_length; struct iscsi_cmd *cmd = NULL;
struct iscsi_cmd *cmd = NULL;
struct iscsi_scsi_req *hdr; struct iscsi_scsi_req *hdr;
int iscsi_task_attr; int iscsi_task_attr;
int sam_task_attr; int sam_task_attr;
...@@ -956,38 +955,26 @@ static int iscsit_handle_scsi_cmd( ...@@ -956,38 +955,26 @@ static int iscsit_handle_scsi_cmd(
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, hdr->data_length, payload_length, conn->cid); hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
/* cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
* The CDB is going to an se_device_t. scsilun_to_int(&hdr->lun));
*/ if (cmd->sense_reason)
ret = transport_lookup_cmd_lun(&cmd->se_cmd, goto attach_cmd;
scsilun_to_int(&hdr->lun));
if (ret < 0) { cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { if (cmd->sense_reason) {
pr_debug("Responding to non-acl'ed," if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
" non-existent or non-exported iSCSI LUN:" return iscsit_add_reject_from_cmd(
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
} }
send_check_condition = 1;
goto attach_cmd; goto attach_cmd;
} }
transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
if (transport_ret == -ENOMEM) {
return iscsit_add_reject_from_cmd( return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd); 1, 1, buf, cmd);
} else if (transport_ret < 0) {
/*
* Unsupported SAM Opcode. CHECK_CONDITION will be sent
* in iscsit_execute_cmd() during the CmdSN OOO Execution
* Mechinism.
*/
send_check_condition = 1;
} else {
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
} }
attach_cmd: attach_cmd:
...@@ -1000,11 +987,12 @@ static int iscsit_handle_scsi_cmd( ...@@ -1000,11 +987,12 @@ static int iscsit_handle_scsi_cmd(
*/ */
core_alua_check_nonop_delay(&cmd->se_cmd); core_alua_check_nonop_delay(&cmd->se_cmd);
ret = iscsit_allocate_iovecs(cmd); if (iscsit_allocate_iovecs(cmd) < 0) {
if (ret < 0)
return iscsit_add_reject_from_cmd( return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd); 1, 0, buf, cmd);
}
/* /*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate * the Immediate Bit is not set, and no Immediate
...@@ -1031,10 +1019,7 @@ static int iscsit_handle_scsi_cmd( ...@@ -1031,10 +1019,7 @@ static int iscsit_handle_scsi_cmd(
* If no Immediate Data is attached, it's OK to return now. * If no Immediate Data is attached, it's OK to return now.
*/ */
if (!cmd->immediate_data) { if (!cmd->immediate_data) {
if (send_check_condition) if (!cmd->sense_reason && cmd->unsolicited_data) {
return 0;
if (cmd->unsolicited_data) {
iscsit_set_dataout_sequence_values(cmd); iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock); spin_lock_bh(&cmd->dataout_timeout_lock);
...@@ -1050,19 +1035,17 @@ static int iscsit_handle_scsi_cmd( ...@@ -1050,19 +1035,17 @@ static int iscsit_handle_scsi_cmd(
* thread. They are processed in CmdSN order by * thread. They are processed in CmdSN order by
* iscsit_check_received_cmdsn() below. * iscsit_check_received_cmdsn() below.
*/ */
if (send_check_condition) { if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data; goto after_immediate_data;
} }
/* /*
* Call directly into transport_generic_new_cmd() to perform * Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation. * the backend memory allocation.
*/ */
ret = transport_generic_new_cmd(&cmd->se_cmd); cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
if (ret < 0) { if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data; goto after_immediate_data;
} }
...@@ -1079,7 +1062,7 @@ static int iscsit_handle_scsi_cmd( ...@@ -1079,7 +1062,7 @@ static int iscsit_handle_scsi_cmd(
* Special case for Unsupported SAM WRITE Opcodes * Special case for Unsupported SAM WRITE Opcodes
* and ImmediateData=Yes. * and ImmediateData=Yes.
*/ */
if (dump_immediate_data) { if (cmd->sense_reason) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return -1; return -1;
} else if (cmd->unsolicited_data) { } else if (cmd->unsolicited_data) {
...@@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) ...@@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
spin_lock_irqsave(&se_cmd->t_state_lock, flags); spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
(se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1; dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
...@@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd( ...@@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
ret = transport_lookup_tmr_lun(&cmd->se_cmd, ret = transport_lookup_tmr_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun)); scsilun_to_int(&hdr->lun));
if (ret < 0) { if (ret < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN; se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach; goto attach;
} }
...@@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd( ...@@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) { switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK: case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf); se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) { if (se_tmr->response)
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
goto attach; goto attach;
}
break; break;
case ISCSI_TM_FUNC_ABORT_TASK_SET: case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA: case ISCSI_TM_FUNC_CLEAR_ACA:
...@@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd( ...@@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
break; break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET: case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach; goto attach;
} }
break; break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET: case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach; goto attach;
} }
...@@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd( ...@@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the * Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful. * TASK_REASSIGN was successful.
*/ */
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) if (se_tmr->response)
break; break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
...@@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd( ...@@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
default: default:
pr_err("Unknown TMR function: 0x%02x, protocol" pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function); " error.\n", function);
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach; goto attach;
} }
...@@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) ...@@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p) if (!conn_p)
return; return;
cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
if (!cmd) { if (!cmd) {
iscsit_dec_conn_usage_count(conn_p); iscsit_dec_conn_usage_count(conn_p);
return; return;
......
...@@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth( ...@@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR); TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
static ssize_t lio_target_nacl_show_tag(
struct se_node_acl *se_nacl,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
}
static ssize_t lio_target_nacl_store_tag(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
if (ret < 0)
return ret;
return count;
}
TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_initiator_attrs[] = { static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr, &lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr, &lio_target_nacl_cmdsn_depth.attr,
&lio_target_nacl_tag.attr,
NULL, NULL,
}; };
...@@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl( ...@@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
stats_cg = &se_nacl->acl_fabric_stat_group; stats_cg = &se_nacl->acl_fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL); GFP_KERNEL);
if (!stats_cg->default_groups) { if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
...@@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn( ...@@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
*/ */
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group; stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL); GFP_KERNEL);
if (!stats_cg->default_groups) { if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
......
...@@ -474,7 +474,7 @@ struct iscsi_cmd { ...@@ -474,7 +474,7 @@ struct iscsi_cmd {
struct scatterlist *first_data_sg; struct scatterlist *first_data_sg;
u32 first_data_sg_off; u32 first_data_sg_off;
u32 kmapped_nents; u32 kmapped_nents;
sense_reason_t sense_reason;
} ____cacheline_aligned; } ____cacheline_aligned;
struct iscsi_tmr_req { struct iscsi_tmr_req {
......
...@@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) ...@@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD: case ISCSI_OP_SCSI_CMD:
/* /*
* Go ahead and send the CHECK_CONDITION status for * Go ahead and send the CHECK_CONDITION status for
* any SCSI CDB exceptions that may have occurred, also * any SCSI CDB exceptions that may have occurred.
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/ */
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { if (cmd->sense_reason) {
if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) { if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS; cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
...@@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) ...@@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception * exception
*/ */
return transport_send_check_condition_and_sense(se_cmd, return transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0); cmd->sense_reason, 0);
} }
/* /*
* Special case for delayed CmdSN with Immediate * Special case for delayed CmdSN with Immediate
...@@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) ...@@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break; break;
case ISCSI_OP_SCSI_TMFUNC: case ISCSI_OP_SCSI_TMFUNC:
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state); cmd->i_state);
......
...@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) ...@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here. * made generic here.
*/ */
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
(cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node); list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock); spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd); iscsit_free_cmd(cmd);
......
...@@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) ...@@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param = iscsi_find_param_from_key( initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list); INITIATORNAME, conn->param_list);
if (!initiatorname_param)
return -1;
sessiontype_param = iscsi_find_param_from_key( sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list); SESSIONTYPE, conn->param_list);
if (!sessiontype_param) if (!initiatorname_param || !sessiontype_param) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1; return -1;
}
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0; sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
...@@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1( ...@@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess); kfree(sess);
return -ENOMEM; return -ENOMEM;
} }
spin_lock(&sess_idr_lock); spin_lock_bh(&sess_idr_lock);
ret = idr_get_new(&sess_idr, NULL, &sess->session_index); ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
spin_unlock(&sess_idr_lock); spin_unlock_bh(&sess_idr_lock);
if (ret < 0) { if (ret < 0) {
pr_err("idr_get_new() for sess_idr failed\n"); pr_err("idr_get_new() for sess_idr failed\n");
...@@ -1118,10 +1118,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1118,10 +1118,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
idr_remove(&sess_idr, conn->sess->session_index); idr_remove(&sess_idr, conn->sess->session_index);
spin_unlock_bh(&sess_idr_lock); spin_unlock_bh(&sess_idr_lock);
} }
if (conn->sess->sess_ops) kfree(conn->sess->sess_ops);
kfree(conn->sess->sess_ops); kfree(conn->sess);
if (conn->sess)
kfree(conn->sess);
old_sess_out: old_sess_out:
iscsi_stop_login_thread_timer(np); iscsi_stop_login_thread_timer(np);
/* /*
......
...@@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log ...@@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->req_buf, login->req_buf,
payload_length, payload_length,
conn); conn);
if (ret < 0) if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1; return -1;
}
if (login->first_request) if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0) if (iscsi_target_check_first_request(conn, login) < 0)
...@@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log ...@@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->rsp_buf, login->rsp_buf,
&login->rsp_length, &login->rsp_length,
conn->param_list); conn->param_list);
if (ret < 0) if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1; return -1;
}
if (!login->auth_complete && if (!login->auth_complete &&
ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
......
...@@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para ...@@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
} }
INIT_LIST_HEAD(&param->p_list); INIT_LIST_HEAD(&param->p_list);
param->name = kzalloc(strlen(name) + 1, GFP_KERNEL); param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) { if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n"); pr_err("Unable to allocate memory for parameter name.\n");
goto out; goto out;
} }
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) { if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n"); pr_err("Unable to allocate memory for parameter value.\n");
goto out; goto out;
} }
memcpy(param->name, name, strlen(name));
param->name[strlen(name)] = '\0';
memcpy(param->value, value, strlen(value));
param->value[strlen(value)] = '\0';
param->phase = phase; param->phase = phase;
param->scope = scope; param->scope = scope;
param->sender = sender; param->sender = sender;
...@@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list) ...@@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
list_del(&param->p_list); list_del(&param->p_list);
kfree(param->name); kfree(param->name);
param->name = NULL;
kfree(param->value); kfree(param->value);
param->value = NULL;
kfree(param); kfree(param);
param = NULL;
} }
iscsi_release_extra_responses(param_list); iscsi_release_extra_responses(param_list);
...@@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value) ...@@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
{ {
kfree(param->value); kfree(param->value);
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) { if (!param->value) {
pr_err("Unable to allocate memory for value.\n"); pr_err("Unable to allocate memory for value.\n");
return -ENOMEM; return -ENOMEM;
} }
memcpy(param->value, value, strlen(value));
param->value[strlen(value)] = '\0';
pr_debug("iSCSI Parameter updated to %s=%s\n", pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value); param->name, param->value);
return 0; return 0;
......
...@@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task( ...@@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
if (!ref_cmd) { if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:" pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid); " %hu.\n", hdr->rtt, conn->cid);
return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn && return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ? iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK; ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
} }
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) { if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
......
...@@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) ...@@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return NULL; return NULL;
} }
list_for_each_entry(ts, &inactive_ts_list, ts_list) ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
break;
list_del(&ts->ts_list); list_del(&ts->ts_list);
iscsit_global->inactive_ts--; iscsit_global->inactive_ts--;
......
...@@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c ...@@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
spin_unlock_bh(&conn->immed_queue_lock); spin_unlock_bh(&conn->immed_queue_lock);
return NULL; return NULL;
} }
list_for_each_entry(qr, &conn->immed_queue_list, qr_list) qr = list_first_entry(&conn->immed_queue_list,
break; struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list); list_del(&qr->qr_list);
if (qr->cmd) if (qr->cmd)
...@@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co ...@@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
return NULL; return NULL;
} }
list_for_each_entry(qr, &conn->response_queue_list, qr_list) qr = list_first_entry(&conn->response_queue_list,
break; struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list); list_del(&qr->qr_list);
if (qr->cmd) if (qr->cmd)
......
...@@ -53,7 +53,6 @@ struct tcm_loop_hba { ...@@ -53,7 +53,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba; struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun; struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep; struct se_port *tl_hba_lun_sep;
struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus; struct tcm_loop_nexus *tl_nexus;
struct device dev; struct device dev;
struct Scsi_Host *sh; struct Scsi_Host *sh;
......
config SBP_TARGET config SBP_TARGET
tristate "FireWire SBP-2 fabric module" tristate "FireWire SBP-2 fabric module"
depends on FIREWIRE && EXPERIMENTAL depends on FIREWIRE
help help
Say Y or M here to enable SCSI target functionality over FireWire. Say Y or M here to enable SCSI target functionality over FireWire.
This enables you to expose SCSI devices to other nodes on the FireWire This enables you to expose SCSI devices to other nodes on the FireWire
......
...@@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work) ...@@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work)
static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent) struct sbp_target_agent *agent)
{ {
__be32 state; int state;
switch (tcode) { switch (tcode) {
case TCODE_READ_QUADLET_REQUEST: case TCODE_READ_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_STATE READ\n"); pr_debug("tgt_agent AGENT_STATE READ\n");
spin_lock_bh(&agent->lock); spin_lock_bh(&agent->lock);
state = cpu_to_be32(agent->state); state = agent->state;
spin_unlock_bh(&agent->lock); spin_unlock_bh(&agent->lock);
memcpy(data, &state, sizeof(state));
*(__be32 *)data = cpu_to_be32(state);
return RCODE_COMPLETE; return RCODE_COMPLETE;
...@@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg( ...@@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg(
tport->mgt_agt = sbp_management_agent_register(tport); tport->mgt_agt = sbp_management_agent_register(tport);
if (IS_ERR(tport->mgt_agt)) { if (IS_ERR(tport->mgt_agt)) {
ret = PTR_ERR(tport->mgt_agt); ret = PTR_ERR(tport->mgt_agt);
kfree(tpg); goto out_free_tpg;
return ERR_PTR(ret);
} }
ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, (void *)tpg, &tpg->se_tpg, (void *)tpg,
TRANSPORT_TPG_TYPE_NORMAL); TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) { if (ret < 0)
sbp_management_agent_unregister(tport->mgt_agt); goto out_unreg_mgt_agt;
kfree(tpg);
return ERR_PTR(ret);
}
return &tpg->se_tpg; return &tpg->se_tpg;
out_unreg_mgt_agt:
sbp_management_agent_unregister(tport->mgt_agt);
out_free_tpg:
tport->tpg = NULL;
kfree(tpg);
return ERR_PTR(ret);
} }
static void sbp_drop_tpg(struct se_portal_group *se_tpg) static void sbp_drop_tpg(struct se_portal_group *se_tpg)
......
This diff is collapsed.
...@@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache; ...@@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int target_emulate_report_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern int target_emulate_set_target_port_groups(struct se_cmd *); extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *, struct se_device *, struct se_port *,
...@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *, ...@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *); struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *); extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *, const char *, int); struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *); struct se_port *);
...@@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *, ...@@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *); char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t); const char *, size_t);
extern int core_setup_alua(struct se_device *, int); extern int core_setup_alua(struct se_device *);
extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */ #endif /* TARGET_CORE_ALUA_H */
This diff is collapsed.
This diff is collapsed.
...@@ -4,10 +4,9 @@ ...@@ -4,10 +4,9 @@
* This file contains generic fabric module configfs infrastructure for * This file contains generic fabric module configfs infrastructure for
* TCM v4.x code * TCM v4.x code
* *
* Copyright (c) 2010,2011 Rising Tide Systems * (c) Copyright 2010-2012 RisingTide Systems LLC.
* Copyright (c) 2010,2011 Linux-iSCSI.org
* *
* Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org> * Nicholas A. Bellinger <nab@linux-iscsi.org>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link( ...@@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link(
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access; int ret = 0, lun_access;
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
" %p to struct lun: %p\n", lun_ci, lun);
return -EFAULT;
}
/* /*
* Ensure that the source port exists * Ensure that the source port exists
*/ */
...@@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun( ...@@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun(
} }
lacl_cg = &lacl->se_lun_group; lacl_cg = &lacl->se_lun_group;
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL); GFP_KERNEL);
if (!lacl_cg->default_groups) { if (!lacl_cg->default_groups) {
pr_err("Unable to allocate lacl_cg->default_groups\n"); pr_err("Unable to allocate lacl_cg->default_groups\n");
...@@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun( ...@@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[1] = NULL; lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &lacl->ml_stat_grps.stat_group; ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
GFP_KERNEL); GFP_KERNEL);
if (!ml_stat_grp->default_groups) { if (!ml_stat_grp->default_groups) {
pr_err("Unable to allocate ml_stat_grp->default_groups\n"); pr_err("Unable to allocate ml_stat_grp->default_groups\n");
...@@ -734,17 +739,21 @@ static int target_fabric_port_link( ...@@ -734,17 +739,21 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci) struct config_item *se_dev_ci)
{ {
struct config_item *tpg_ci; struct config_item *tpg_ci;
struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group); struct se_lun, lun_group);
struct se_lun *lun_p; struct se_lun *lun_p;
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct se_subsystem_dev *se_dev = container_of( struct se_device *dev =
to_config_group(se_dev_ci), struct se_subsystem_dev, container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
se_dev_group);
struct target_fabric_configfs *tf; struct target_fabric_configfs *tf;
int ret; int ret;
if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
" %p to struct se_device: %p\n", se_dev_ci, dev);
return -EFAULT;
}
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci), se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group); struct se_portal_group, tpg_group);
...@@ -755,14 +764,6 @@ static int target_fabric_port_link( ...@@ -755,14 +764,6 @@ static int target_fabric_port_link(
return -EEXIST; return -EEXIST;
} }
dev = se_dev->se_dev_ptr;
if (!dev) {
pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
}
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) { if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n"); pr_err("core_dev_add_lun() failed\n");
...@@ -869,7 +870,7 @@ static struct config_group *target_fabric_make_lun( ...@@ -869,7 +870,7 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group; lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL); GFP_KERNEL);
if (!lun_cg->default_groups) { if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n"); pr_err("Unable to allocate lun_cg->default_groups\n");
......
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
* This file contains generic high level protocol identifier and PR * This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules * handlers for TCM fabric modules
* *
* Copyright (c) 2010 Rising Tide Systems, Inc. * (c) Copyright 2010-2012 RisingTide Systems LLC.
* Copyright (c) 2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@linux-iscsi.org> * Nicholas A. Bellinger <nab@linux-iscsi.org>
* *
......
This diff is collapsed.
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04 #define FDBD_HAS_BUFFERED_IO_WCE 0x04
struct fd_dev { struct fd_dev {
struct se_device dev;
u32 fbd_flags; u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME]; unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */ /* Unique Ramdisk Device ID in Ramdisk HBA */
......
...@@ -3,10 +3,7 @@ ...@@ -3,10 +3,7 @@
* *
* This file contains the TCM HBA Transport related functions. * This file contains the TCM HBA Transport related functions.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2003-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
...@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock); spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex); mutex_init(&hba->hba_access_mutex);
...@@ -152,8 +148,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) ...@@ -152,8 +148,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
int int
core_delete_hba(struct se_hba *hba) core_delete_hba(struct se_hba *hba)
{ {
if (!list_empty(&hba->hba_dev_list)) WARN_ON(hba->dev_count);
dump_stack();
hba->transport->detach_hba(hba); hba->transport->detach_hba(hba);
......
This diff is collapsed.
...@@ -14,6 +14,7 @@ struct iblock_req { ...@@ -14,6 +14,7 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01 #define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev { struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags; u32 ibd_flags;
struct bio_set *ibd_bio_set; struct bio_set *ibd_bio_set;
......
...@@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *, ...@@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *, void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
int target_report_luns(struct se_cmd *);
void se_release_device_for_hba(struct se_device *);
void se_release_vpd_for_dev(struct se_device *);
int se_free_virtual_device(struct se_device *, struct se_hba *);
int se_dev_check_online(struct se_device *);
int se_dev_check_shutdown(struct se_device *);
void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32); int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32); int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32); int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_dpo(struct se_device *, int); int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int); int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int); int se_dev_set_emulate_fua_read(struct se_device *, int);
...@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, ...@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl); struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void); int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void); void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
/* target_core_hba.c */ /* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32); struct se_hba *core_alloc_hba(const char *, u32, u32);
...@@ -105,10 +102,11 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); ...@@ -105,10 +102,11 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *); int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *); void transport_send_task_abort(struct se_cmd *);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */ /* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *); void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
......
This diff is collapsed.
...@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache; ...@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *, extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32); char *, u32);
extern int target_scsi2_reservation_release(struct se_cmd *); extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern int target_scsi2_reservation_reserve(struct se_cmd *); extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration( extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64, struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32, unsigned char *, unsigned char *, u32,
...@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, ...@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *); extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int); extern unsigned char *core_scsi3_pr_dump_type(int);
extern int target_scsi3_emulate_pr_in(struct se_cmd *); extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
extern int target_scsi3_emulate_pr_out(struct se_cmd *); extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int); extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */ #endif /* TARGET_CORE_PR_H */
This diff is collapsed.
...@@ -37,6 +37,7 @@ struct pscsi_plugin_task { ...@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20 #define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt { struct pscsi_dev_virt {
struct se_device dev;
int pdv_flags; int pdv_flags;
int pdv_host_id; int pdv_host_id;
int pdv_channel_id; int pdv_channel_id;
...@@ -44,7 +45,6 @@ struct pscsi_dev_virt { ...@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
int pdv_lun_id; int pdv_lun_id;
struct block_device *pdv_bd; struct block_device *pdv_bd;
struct scsi_device *pdv_sd; struct scsi_device *pdv_sd;
struct se_hba *pdv_se_hba;
} ____cacheline_aligned; } ____cacheline_aligned;
typedef enum phv_modes { typedef enum phv_modes {
......
...@@ -4,10 +4,7 @@ ...@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport * This file contains the Storage Engine <-> Ramdisk transport
* specific functions. * specific functions.
* *
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2003-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
...@@ -41,7 +38,10 @@ ...@@ -41,7 +38,10 @@
#include "target_core_rd.h" #include "target_core_rd.h"
static struct se_subsystem_api rd_mcp_template; static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
return container_of(dev, struct rd_dev, dev);
}
/* rd_attach_hba(): (Part of se_subsystem_api_t template) /* rd_attach_hba(): (Part of se_subsystem_api_t template)
* *
...@@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) ...@@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0; return 0;
} }
static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{ {
struct rd_dev *rd_dev; struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr; struct rd_host *rd_host = hba->hba_ptr;
...@@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) ...@@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
rd_dev->rd_host = rd_host; rd_dev->rd_host = rd_host;
return rd_dev; return &rd_dev->dev;
} }
static struct se_device *rd_create_virtdevice(struct se_hba *hba, static int rd_configure_device(struct se_device *dev)
struct se_subsystem_dev *se_dev, void *p)
{ {
struct se_device *dev; struct rd_dev *rd_dev = RD_DEV(dev);
struct se_dev_limits dev_limits; struct rd_host *rd_host = dev->se_hba->hba_ptr;
struct rd_dev *rd_dev = p; int ret;
struct rd_host *rd_host = hba->hba_ptr;
int dev_flags = 0, ret;
char prod[16], rev[4];
memset(&dev_limits, 0, sizeof(struct se_dev_limits)); if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
ret = rd_build_device_space(rd_dev); ret = rd_build_device_space(rd_dev);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
snprintf(prod, 16, "RAMDISK-MCP"); dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
snprintf(rev, 4, "%s", RD_MCP_VERSION); dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
dev_limits.limits.max_hw_sectors = UINT_MAX;
dev_limits.limits.max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
&rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
...@@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba, ...@@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
rd_dev->sg_table_count, rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return dev; return 0;
fail: fail:
rd_release_device_space(rd_dev); rd_release_device_space(rd_dev);
return ERR_PTR(ret); return ret;
} }
static void rd_free_device(void *p) static void rd_free_device(struct se_device *dev)
{ {
struct rd_dev *rd_dev = p; struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev); rd_release_device_space(rd_dev);
kfree(rd_dev); kfree(rd_dev);
...@@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) ...@@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL; return NULL;
} }
static int rd_execute_rw(struct se_cmd *cmd) static sense_reason_t
rd_execute_rw(struct se_cmd *cmd)
{ {
struct scatterlist *sgl = cmd->t_data_sg; struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents; u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction; enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table; struct rd_dev_sg_table *table;
struct scatterlist *rd_sg; struct scatterlist *rd_sg;
struct sg_mapping_iter m; struct sg_mapping_iter m;
...@@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd) ...@@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 src_len; u32 src_len;
u64 tmp; u64 tmp;
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE); rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp; rd_page = tmp;
rd_size = cmd->data_length; rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page); table = rd_get_sg_table(dev, rd_page);
if (!table) if (!table)
return -EINVAL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset]; rd_sg = &table->sg_table[rd_page - table->page_start_offset];
...@@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd) ...@@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page); table = rd_get_sg_table(dev, rd_page);
if (!table) { if (!table) {
sg_miter_stop(&m); sg_miter_stop(&m);
return -EINVAL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
/* since we increment, the first sg entry is correct */ /* since we increment, the first sg entry is correct */
...@@ -378,13 +367,10 @@ static match_table_t tokens = { ...@@ -378,13 +367,10 @@ static match_table_t tokens = {
{Opt_err, NULL} {Opt_err, NULL}
}; };
static ssize_t rd_set_configfs_dev_params( static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
struct se_hba *hba, const char *page, ssize_t count)
struct se_subsystem_dev *se_dev,
const char *page,
ssize_t count)
{ {
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts; char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token; int ret = 0, arg, token;
...@@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params( ...@@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{ {
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
return 0;
}
static ssize_t rd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id); rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
...@@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params( ...@@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params(
return bl; return bl;
} }
static u32 rd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 rd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t rd_get_blocks(struct se_device *dev) static sector_t rd_get_blocks(struct se_device *dev)
{ {
struct rd_dev *rd_dev = dev->dev_ptr; struct rd_dev *rd_dev = RD_DEV(dev);
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
dev->se_sub_dev->se_dev_attrib.block_size) - 1; dev->dev_attrib.block_size) - 1;
return blocks_long; return blocks_long;
} }
static struct spc_ops rd_spc_ops = { static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw, .execute_rw = rd_execute_rw,
}; };
static int rd_parse_cdb(struct se_cmd *cmd) static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{ {
return sbc_parse_cdb(cmd, &rd_spc_ops); return sbc_parse_cdb(cmd, &rd_sbc_ops);
} }
static struct se_subsystem_api rd_mcp_template = { static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp", .name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba, .attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba, .detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_allocate_virtdevice, .alloc_device = rd_alloc_device,
.create_virtdevice = rd_create_virtdevice, .configure_device = rd_configure_device,
.free_device = rd_free_device, .free_device = rd_free_device,
.parse_cdb = rd_parse_cdb, .parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_rev = rd_get_device_rev, .get_device_type = sbc_get_device_type,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks, .get_blocks = rd_get_blocks,
}; };
......
...@@ -24,6 +24,7 @@ struct rd_dev_sg_table { ...@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01 #define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev { struct rd_dev {
struct se_device dev;
u32 rd_flags; u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */ /* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id; u32 rd_dev_id;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -3,8 +3,7 @@ ...@@ -3,8 +3,7 @@
* *
* This file contains SPC-3 task management infrastructure * This file contains SPC-3 task management infrastructure
* *
* Copyright (c) 2009,2010 Rising Tide Systems * (c) Copyright 2009-2012 RisingTide Systems LLC.
* Copyright (c) 2009,2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
...@@ -371,7 +370,7 @@ int core_tmr_lun_reset( ...@@ -371,7 +370,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED * which the command was received shall be completed with TASK ABORTED
* status (see SAM-4). * status (see SAM-4).
*/ */
tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; tas = dev->dev_attrib.emulate_tas;
/* /*
* Determine if this se_tmr is coming from a $FABRIC_MOD * Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough.. * or struct se_device passthrough..
...@@ -399,10 +398,10 @@ int core_tmr_lun_reset( ...@@ -399,10 +398,10 @@ int core_tmr_lun_reset(
* LOGICAL UNIT RESET * LOGICAL UNIT RESET
*/ */
if (!preempt_and_abort_list && if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) { (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL; dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS; dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock); spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
} }
......
...@@ -3,10 +3,7 @@ ...@@ -3,10 +3,7 @@
* *
* This file contains generic Target Portal Group related functions. * This file contains generic Target Portal Group related functions.
* *
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * (c) Copyright 2002-2012 RisingTide Systems LLC.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* *
* Nicholas A. Bellinger <nab@kernel.org> * Nicholas A. Bellinger <nab@kernel.org>
* *
...@@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth(
} }
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
/* core_tpg_set_initiator_node_tag():
*
* Initiator nodeacl tags are not used internally, but may be used by
* userspace to emulate aliases or groups.
* Returns length of newly-set tag or -EINVAL.
*/
int core_tpg_set_initiator_node_tag(
struct se_portal_group *tpg,
struct se_node_acl *acl,
const char *new_tag)
{
if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
return -EINVAL;
if (!strncmp("NULL", new_tag, 4)) {
acl->acl_tag[0] = '\0';
return 0;
}
return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{ {
/* Set in core_dev_setup_virtual_lun0() */ /* Set in core_dev_setup_virtual_lun0() */
...@@ -672,6 +692,7 @@ int core_tpg_register( ...@@ -672,6 +692,7 @@ int core_tpg_register(
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = se_tpg->tpg_lun_list[i]; lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i; lun->unpacked_lun = i;
lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE; lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0); atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp); init_completion(&lun->lun_shutdown_comp);
......
This diff is collapsed.
This diff is collapsed.
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
extern struct kmem_cache *se_ua_cache; extern struct kmem_cache *se_ua_cache;
extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *); extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8); extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *); extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment