Commit 393bcfae authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "Here are the target-pending fixes for v4.12-rc4:

   - ibmviscsis ABORT_TASK handling fixes that missed the v4.12 merge
     window. (Bryant Ly and Michael Cyr)

   - Re-add a target-core check enforcing WRITE overflow reject that was
     relaxed in v4.3, to avoid unsupported iscsi-target immediate data
     overflow. (nab)

   - Fix a target-core-user OOPs during device removal. (MNC + Bryant
     Ly)

   - Fix a long standing iscsi-target potential issue where kthread exit
     did not wait for kthread_should_stop(). (Jiang Yi)

   - Fix a iscsi-target v3.12.y regression OOPs involving initial login
     PDU processing during asynchronous TCP connection close. (MNC +
     nab)

  This is a little larger than usual for an -rc4, primarily due to the
  iscsi-target v3.12.y regression OOPs bug-fix.

  However, it's an important patch as MNC + Hannes where both able to
  trigger it using a reduced iscsi initiator login timeout combined with
  a backend taking a long time to complete I/Os during iscsi login
  driven session reinstatement"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  iscsi-target: Always wait for kthread_should_stop() before kthread exit
  iscsi-target: Fix initial login PDU asynchronous socket close OOPs
  tcmu: fix crash during device removal
  target: Re-add check to reject control WRITEs with overflow data
  ibmvscsis: Fix the incorrect req_lim_delta
  ibmvscsis: Clear left-over abort_cmd pointers
parents a3748463 5e0cf5e6
...@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) ...@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
cmd = list_first_entry_or_null(&vscsi->free_cmd, cmd = list_first_entry_or_null(&vscsi->free_cmd,
struct ibmvscsis_cmd, list); struct ibmvscsis_cmd, list);
if (cmd) { if (cmd) {
if (cmd->abort_cmd)
cmd->abort_cmd = NULL;
cmd->flags &= ~(DELAY_SEND); cmd->flags &= ~(DELAY_SEND);
list_del(&cmd->list); list_del(&cmd->list);
cmd->iue = iue; cmd->iue = iue;
...@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) ...@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
if (cmd->abort_cmd) { if (cmd->abort_cmd) {
retry = true; retry = true;
cmd->abort_cmd->flags &= ~(DELAY_SEND); cmd->abort_cmd->flags &= ~(DELAY_SEND);
cmd->abort_cmd = NULL;
} }
/* /*
...@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) ...@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
list_del(&cmd->list); list_del(&cmd->list);
ibmvscsis_free_cmd_resources(vscsi, ibmvscsis_free_cmd_resources(vscsi,
cmd); cmd);
/*
* With a successfully aborted op
* through LIO we want to increment the
* the vscsi credit so that when we dont
* send a rsp to the original scsi abort
* op (h_send_crq), but the tm rsp to
* the abort is sent, the credit is
* correctly sent with the abort tm rsp.
* We would need 1 for the abort tm rsp
* and 1 credit for the aborted scsi op.
* Thus we need to increment here.
* Also we want to increment the credit
* here because we want to make sure
* cmd is actually released first
* otherwise the client will think it
* it can send a new cmd, and we could
* find ourselves short of cmd elements.
*/
vscsi->credit += 1;
} else { } else {
iue = cmd->iue; iue = cmd->iue;
...@@ -2962,9 +2984,6 @@ static long srp_build_response(struct scsi_info *vscsi, ...@@ -2962,9 +2984,6 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->opcode = SRP_RSP; rsp->opcode = SRP_RSP;
if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
else
rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
rsp->tag = cmd->rsp.tag; rsp->tag = cmd->rsp.tag;
rsp->flags = 0; rsp->flags = 0;
......
...@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg) ...@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg)
{ {
int ret = 0; int ret = 0;
struct iscsi_conn *conn = arg; struct iscsi_conn *conn = arg;
bool conn_freed = false;
/* /*
* Allow ourselves to be interrupted by SIGINT so that a * Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally. * connection recovery / failure event can be triggered externally.
...@@ -3815,13 +3817,15 @@ int iscsi_target_tx_thread(void *arg) ...@@ -3815,13 +3817,15 @@ int iscsi_target_tx_thread(void *arg)
goto transport_err; goto transport_err;
ret = iscsit_handle_response_queue(conn); ret = iscsit_handle_response_queue(conn);
if (ret == 1) if (ret == 1) {
goto get_immediate; goto get_immediate;
else if (ret == -ECONNRESET) } else if (ret == -ECONNRESET) {
conn_freed = true;
goto out; goto out;
else if (ret < 0) } else if (ret < 0) {
goto transport_err; goto transport_err;
} }
}
transport_err: transport_err:
/* /*
...@@ -3830,8 +3834,13 @@ int iscsi_target_tx_thread(void *arg) ...@@ -3830,8 +3834,13 @@ int iscsi_target_tx_thread(void *arg)
* responsible for cleaning up the early connection failure. * responsible for cleaning up the early connection failure.
*/ */
if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
iscsit_take_action_for_connection_exit(conn); iscsit_take_action_for_connection_exit(conn, &conn_freed);
out: out:
if (!conn_freed) {
while (!kthread_should_stop()) {
msleep(100);
}
}
return 0; return 0;
} }
...@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg) ...@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg)
{ {
int rc; int rc;
struct iscsi_conn *conn = arg; struct iscsi_conn *conn = arg;
bool conn_freed = false;
/* /*
* Allow ourselves to be interrupted by SIGINT so that a * Allow ourselves to be interrupted by SIGINT so that a
...@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg) ...@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg)
*/ */
rc = wait_for_completion_interruptible(&conn->rx_login_comp); rc = wait_for_completion_interruptible(&conn->rx_login_comp);
if (rc < 0 || iscsi_target_check_conn_state(conn)) if (rc < 0 || iscsi_target_check_conn_state(conn))
return 0; goto out;
if (!conn->conn_transport->iscsit_get_rx_pdu) if (!conn->conn_transport->iscsit_get_rx_pdu)
return 0; return 0;
...@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg) ...@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg)
if (!signal_pending(current)) if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1); atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn); iscsit_take_action_for_connection_exit(conn, &conn_freed);
out:
if (!conn_freed) {
while (!kthread_should_stop()) {
msleep(100);
}
}
return 0; return 0;
} }
......
...@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) ...@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
} }
} }
void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
{ {
*conn_freed = false;
spin_lock_bh(&conn->state_lock); spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) { if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
...@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) ...@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
iscsit_close_connection(conn); iscsit_close_connection(conn);
*conn_freed = true;
return; return;
} }
...@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) ...@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
spin_unlock_bh(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
iscsit_handle_connection_cleanup(conn); iscsit_handle_connection_cleanup(conn);
*conn_freed = true;
} }
...@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); ...@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
extern void iscsit_fall_back_to_erl0(struct iscsi_session *); extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
#endif /*** ISCSI_TARGET_ERL0_H ***/ #endif /*** ISCSI_TARGET_ERL0_H ***/
...@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg) ...@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
break; break;
} }
while (!kthread_should_stop()) {
msleep(100);
}
return 0; return 0;
} }
...@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) ...@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
static bool iscsi_target_sk_state_check(struct sock *sk) static bool __iscsi_target_sk_check_close(struct sock *sk)
{ {
if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
"returning FALSE\n"); "returning FALSE\n");
return true;
}
return false; return false;
}
static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
{
bool state = false;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
read_lock_bh(&sk->sk_callback_lock);
state = (__iscsi_target_sk_check_close(sk) ||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
read_unlock_bh(&sk->sk_callback_lock);
} }
return true; return state;
}
static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
{
bool state = false;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
read_lock_bh(&sk->sk_callback_lock);
state = test_bit(flag, &conn->login_flags);
read_unlock_bh(&sk->sk_callback_lock);
}
return state;
}
static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
{
bool state = false;
if (conn->sock) {
struct sock *sk = conn->sock->sk;
write_lock_bh(&sk->sk_callback_lock);
state = (__iscsi_target_sk_check_close(sk) ||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
if (!state)
clear_bit(flag, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
}
return state;
} }
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
...@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) ...@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
conn, current->comm, current->pid); conn, current->comm, current->pid);
/*
* If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
* before initial PDU processing in iscsi_target_start_negotiation()
* has completed, go ahead and retry until it's cleared.
*
* Otherwise if the TCP connection drops while this is occuring,
* iscsi_target_start_negotiation() will detect the failure, call
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
* remaining iscsi connection resources from iscsi_np process context.
*/
if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
return;
}
spin_lock(&tpg->tpg_state_lock); spin_lock(&tpg->tpg_state_lock);
state = (tpg->tpg_state == TPG_STATE_ACTIVE); state = (tpg->tpg_state == TPG_STATE_ACTIVE);
...@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) ...@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
if (!state) { if (!state) {
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
iscsi_target_restore_sock_callbacks(conn); goto err;
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
return;
} }
if (conn->sock) { if (iscsi_target_sk_check_close(conn)) {
struct sock *sk = conn->sock->sk;
read_lock_bh(&sk->sk_callback_lock);
state = iscsi_target_sk_state_check(sk);
read_unlock_bh(&sk->sk_callback_lock);
if (!state) {
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
iscsi_target_restore_sock_callbacks(conn); goto err;
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
return;
}
} }
conn->login_kworker = current; conn->login_kworker = current;
...@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) ...@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
flush_signals(current); flush_signals(current);
conn->login_kworker = NULL; conn->login_kworker = NULL;
if (rc < 0) { if (rc < 0)
iscsi_target_restore_sock_callbacks(conn); goto err;
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
return;
}
pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
conn, current->comm, current->pid); conn, current->comm, current->pid);
rc = iscsi_target_do_login(conn, login); rc = iscsi_target_do_login(conn, login);
if (rc < 0) { if (rc < 0) {
iscsi_target_restore_sock_callbacks(conn); goto err;
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
} else if (!rc) { } else if (!rc) {
if (conn->sock) { if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
struct sock *sk = conn->sock->sk; goto err;
write_lock_bh(&sk->sk_callback_lock);
clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
}
} else if (rc == 1) { } else if (rc == 1) {
iscsi_target_nego_release(conn); iscsi_target_nego_release(conn);
iscsi_post_login_handler(np, conn, zero_tsih); iscsi_post_login_handler(np, conn, zero_tsih);
iscsit_deaccess_np(np, tpg, tpg_np); iscsit_deaccess_np(np, tpg, tpg_np);
} }
return;
err:
iscsi_target_restore_sock_callbacks(conn);
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
} }
static void iscsi_target_do_cleanup(struct work_struct *work) static void iscsi_target_do_cleanup(struct work_struct *work)
...@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) ...@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
orig_state_change(sk); orig_state_change(sk);
return; return;
} }
state = __iscsi_target_sk_check_close(sk);
pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
" conn: %p\n", conn); " conn: %p\n", conn);
if (state)
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock); write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk); orig_state_change(sk);
return; return;
} }
if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
conn); conn);
write_unlock_bh(&sk->sk_callback_lock); write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk); orig_state_change(sk);
return; return;
} }
/*
state = iscsi_target_sk_state_check(sk); * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
* but only queue conn->login_work -> iscsi_target_do_login_rx()
* processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
*
* When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
* will detect the dropped TCP connection from delayed workqueue context.
*
* If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
* iscsi_target_start_negotiation() is running, iscsi_target_do_login()
* via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
* via iscsi_target_sk_check_and_clear() is responsible for detecting the
* dropped TCP connection in iscsi_np process context, and cleaning up
* the remaining iscsi connection resources.
*/
if (state) {
pr_debug("iscsi_target_sk_state_change got failed state\n");
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock); write_unlock_bh(&sk->sk_callback_lock);
pr_debug("iscsi_target_sk_state_change: state: %d\n", state); orig_state_change(sk);
if (!state) { if (!state)
pr_debug("iscsi_target_sk_state_change got failed state\n"); schedule_delayed_work(&conn->login_work, 0);
schedule_delayed_work(&conn->login_cleanup_work, 0);
return; return;
} }
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk); orig_state_change(sk);
} }
...@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo ...@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
if (iscsi_target_handle_csg_one(conn, login) < 0) if (iscsi_target_handle_csg_one(conn, login) < 0)
return -1; return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
/*
* Check to make sure the TCP connection has not
* dropped asynchronously while session reinstatement
* was occuring in this kthread context, before
* transitioning to full feature phase operation.
*/
if (iscsi_target_sk_check_close(conn))
return -1;
login->tsih = conn->sess->tsih; login->tsih = conn->sess->tsih;
login->login_complete = 1; login->login_complete = 1;
iscsi_target_restore_sock_callbacks(conn); iscsi_target_restore_sock_callbacks(conn);
...@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo ...@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
break; break;
} }
if (conn->sock) {
struct sock *sk = conn->sock->sk;
bool state;
read_lock_bh(&sk->sk_callback_lock);
state = iscsi_target_sk_state_check(sk);
read_unlock_bh(&sk->sk_callback_lock);
if (!state) {
pr_debug("iscsi_target_do_login() failed state for"
" conn: %p\n", conn);
return -1;
}
}
return 0; return 0;
} }
...@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation( ...@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
write_lock_bh(&sk->sk_callback_lock); write_lock_bh(&sk->sk_callback_lock);
set_bit(LOGIN_FLAGS_READY, &conn->login_flags); set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock); write_unlock_bh(&sk->sk_callback_lock);
} }
/*
* If iscsi_target_do_login returns zero to signal more PDU
* exchanges are required to complete the login, go ahead and
* clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
* is still active.
*
* Otherwise if TCP connection dropped asynchronously, go ahead
* and perform connection cleanup now.
*/
ret = iscsi_target_do_login(conn, login); ret = iscsi_target_do_login(conn, login);
if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
ret = -1;
if (ret < 0) { if (ret < 0) {
cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_work);
cancel_delayed_work_sync(&conn->login_cleanup_work); cancel_delayed_work_sync(&conn->login_cleanup_work);
......
...@@ -1160,16 +1160,29 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) ...@@ -1160,16 +1160,29 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->unknown_data_length) { if (cmd->unknown_data_length) {
cmd->data_length = size; cmd->data_length = size;
} else if (size != cmd->data_length) { } else if (size != cmd->data_length) {
pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:" " %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->get_fabric_name(), " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]); cmd->data_length, size, cmd->t_task_cdb[0]);
if (cmd->data_direction == DMA_TO_DEVICE && if (cmd->data_direction == DMA_TO_DEVICE) {
cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
pr_err("Rejecting underflow/overflow WRITE data\n"); pr_err_ratelimited("Rejecting underflow/overflow"
" for WRITE data CDB\n");
return TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
} }
/*
* Some fabric drivers like iscsi-target still expect to
* always reject overflow writes. Reject this case until
* full fabric driver level support for overflow writes
* is introduced tree-wide.
*/
if (size > cmd->data_length) {
pr_err_ratelimited("Rejecting overflow for"
" WRITE control CDB\n");
return TCM_INVALID_CDB_FIELD;
}
}
/* /*
* Reject READ_* or WRITE_* with overflow/underflow for * Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_CDB. * type SCF_SCSI_DATA_CDB.
......
...@@ -97,7 +97,7 @@ struct tcmu_hba { ...@@ -97,7 +97,7 @@ struct tcmu_hba {
struct tcmu_dev { struct tcmu_dev {
struct list_head node; struct list_head node;
struct kref kref;
struct se_device se_dev; struct se_device se_dev;
char *name; char *name;
...@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) ...@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
if (!udev) if (!udev)
return NULL; return NULL;
kref_init(&udev->kref);
udev->name = kstrdup(name, GFP_KERNEL); udev->name = kstrdup(name, GFP_KERNEL);
if (!udev->name) { if (!udev->name) {
...@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode) ...@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
return 0; return 0;
} }
static void tcmu_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct tcmu_dev *udev = TCMU_DEV(dev);
kfree(udev->uio_info.name);
kfree(udev->name);
kfree(udev);
}
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
static int tcmu_release(struct uio_info *info, struct inode *inode) static int tcmu_release(struct uio_info *info, struct inode *inode)
{ {
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
...@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) ...@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
pr_debug("close\n"); pr_debug("close\n");
/* release ref from configure */
kref_put(&udev->kref, tcmu_dev_kref_release);
return 0; return 0;
} }
...@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev) ...@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = 128; dev->dev_attrib.hw_max_sectors = 128;
dev->dev_attrib.hw_queue_depth = 128; dev->dev_attrib.hw_queue_depth = 128;
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
*/
kref_get(&udev->kref);
ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
udev->uio_info.uio_dev->minor); udev->uio_info.uio_dev->minor);
if (ret) if (ret)
...@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev) ...@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
return 0; return 0;
err_netlink: err_netlink:
kref_put(&udev->kref, tcmu_dev_kref_release);
uio_unregister_device(&udev->uio_info); uio_unregister_device(&udev->uio_info);
err_register: err_register:
vfree(udev->mb_addr); vfree(udev->mb_addr);
err_vzalloc: err_vzalloc:
kfree(info->name); kfree(info->name);
info->name = NULL;
return ret; return ret;
} }
...@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) ...@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
return -EINVAL; return -EINVAL;
} }
static void tcmu_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct tcmu_dev *udev = TCMU_DEV(dev);
kfree(udev);
}
static bool tcmu_dev_configured(struct tcmu_dev *udev) static bool tcmu_dev_configured(struct tcmu_dev *udev)
{ {
return udev->uio_info.uio_dev ? true : false; return udev->uio_info.uio_dev ? true : false;
...@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev) ...@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
udev->uio_info.uio_dev->minor); udev->uio_info.uio_dev->minor);
uio_unregister_device(&udev->uio_info); uio_unregister_device(&udev->uio_info);
kfree(udev->uio_info.name);
kfree(udev->name);
} }
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
/* release ref from init */
kref_put(&udev->kref, tcmu_dev_kref_release);
} }
enum { enum {
......
...@@ -557,6 +557,7 @@ struct iscsi_conn { ...@@ -557,6 +557,7 @@ struct iscsi_conn {
#define LOGIN_FLAGS_READ_ACTIVE 1 #define LOGIN_FLAGS_READ_ACTIVE 1
#define LOGIN_FLAGS_CLOSED 2 #define LOGIN_FLAGS_CLOSED 2
#define LOGIN_FLAGS_READY 4 #define LOGIN_FLAGS_READY 4
#define LOGIN_FLAGS_INITIAL_PDU 8
unsigned long login_flags; unsigned long login_flags;
struct delayed_work login_work; struct delayed_work login_work;
struct delayed_work login_cleanup_work; struct delayed_work login_cleanup_work;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment