Commit 412eeafa authored by Mike Christie's avatar Mike Christie Committed by James Bottomley

[SCSI] iser: Modify iser to take a iscsi_endpoint struct in ep callouts and session setup

This hooks iser into the iscsi endpoint code. Previously it handled the
lookup and allocation. This has been made generic so bnx2i and iser can
share it. It also allows us to pass iser the leading conn's ep, so we
know the ib_deivce being used and can set it as the scsi_host's parent.
And that allows scsi-ml to set the dma_mask based on those values.
Signed-off-by: default avatarMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent d82ff9be
...@@ -262,24 +262,6 @@ iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task) ...@@ -262,24 +262,6 @@ iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
} }
} }
static struct iser_conn *
iscsi_iser_ib_conn_lookup(__u64 ep_handle)
{
struct iser_conn *ib_conn;
struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
mutex_lock(&ig.connlist_mutex);
list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
if (ib_conn == uib_conn) {
mutex_unlock(&ig.connlist_mutex);
return ib_conn;
}
}
mutex_unlock(&ig.connlist_mutex);
iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
return NULL;
}
static struct iscsi_cls_conn * static struct iscsi_cls_conn *
iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
{ {
...@@ -335,6 +317,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -335,6 +317,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn; struct iscsi_iser_conn *iser_conn;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
int error; int error;
error = iscsi_conn_bind(cls_session, cls_conn, is_leading); error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
...@@ -343,12 +326,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -343,12 +326,14 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* the transport ep handle comes from user space so it must be /* the transport ep handle comes from user space so it must be
* verified against the global ib connections list */ * verified against the global ib connections list */
ib_conn = iscsi_iser_ib_conn_lookup(transport_eph); ep = iscsi_lookup_endpoint(transport_eph);
if (!ib_conn) { if (!ep) {
iser_err("can't bind eph %llx\n", iser_err("can't bind eph %llx\n",
(unsigned long long)transport_eph); (unsigned long long)transport_eph);
return -EINVAL; return -EINVAL;
} }
ib_conn = ep->dd_data;
/* binds the iSER connection retrieved from the previously /* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges * connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */ * connection pointers */
...@@ -401,21 +386,17 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) ...@@ -401,21 +386,17 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
} }
static struct iscsi_cls_session * static struct iscsi_cls_session *
iscsi_iser_session_create(struct Scsi_Host *shost, iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth, uint16_t cmds_max, uint16_t qdepth,
uint32_t initial_cmdsn, uint32_t *hostno) uint32_t initial_cmdsn, uint32_t *hostno)
{ {
struct iscsi_cls_session *cls_session; struct iscsi_cls_session *cls_session;
struct iscsi_session *session; struct iscsi_session *session;
struct Scsi_Host *shost;
int i; int i;
struct iscsi_task *task; struct iscsi_task *task;
struct iscsi_iser_task *iser_task; struct iscsi_iser_task *iser_task;
struct iser_conn *ib_conn;
if (shost) {
printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n",
shost->host_no);
return NULL;
}
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN); shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
if (!shost) if (!shost)
...@@ -426,7 +407,15 @@ iscsi_iser_session_create(struct Scsi_Host *shost, ...@@ -426,7 +407,15 @@ iscsi_iser_session_create(struct Scsi_Host *shost,
shost->max_channel = 0; shost->max_channel = 0;
shost->max_cmd_len = 16; shost->max_cmd_len = 16;
if (iscsi_host_add(shost, NULL)) /*
* older userspace tools (before 2.0-870) did not pass us
* the leading conn's ep so this will be NULL;
*/
if (ep)
ib_conn = ep->dd_data;
if (iscsi_host_add(shost,
ep ? ib_conn->device->ib_device->dma_device : NULL))
goto free_host; goto free_host;
*hostno = shost->host_no; *hostno = shost->host_no;
...@@ -529,34 +518,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s ...@@ -529,34 +518,37 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt; stats->custom[3].value = conn->fmr_unalign_cnt;
} }
static int static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking, iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
__u64 *ep_handle)
{ {
int err; int err;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
err = iser_conn_init(&ib_conn); ep = iscsi_create_endpoint(sizeof(*ib_conn));
if (err) if (!ep)
goto out; return ERR_PTR(-ENOMEM);
err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking); ib_conn = ep->dd_data;
if (!err) ib_conn->ep = ep;
*ep_handle = (__u64)(unsigned long)ib_conn; iser_conn_init(ib_conn);
out: err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
return err; non_blocking);
if (err) {
iscsi_destroy_endpoint(ep);
return ERR_PTR(err);
}
return ep;
} }
static int static int
iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{ {
struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); struct iser_conn *ib_conn;
int rc; int rc;
if (!ib_conn) ib_conn = ep->dd_data;
return -EINVAL;
rc = wait_event_interruptible_timeout(ib_conn->wait, rc = wait_event_interruptible_timeout(ib_conn->wait,
ib_conn->state == ISER_CONN_UP, ib_conn->state == ISER_CONN_UP,
msecs_to_jiffies(timeout_ms)); msecs_to_jiffies(timeout_ms));
...@@ -578,14 +570,11 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) ...@@ -578,14 +570,11 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
} }
static void static void
iscsi_iser_ep_disconnect(__u64 ep_handle) iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{ {
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); ib_conn = ep->dd_data;
if (!ib_conn)
return;
if (ib_conn->iser_conn) if (ib_conn->iser_conn)
/* /*
* Must suspend xmit path if the ep is bound to the * Must suspend xmit path if the ep is bound to the
......
...@@ -174,6 +174,7 @@ struct iser_data_buf { ...@@ -174,6 +174,7 @@ struct iser_data_buf {
struct iser_device; struct iser_device;
struct iscsi_iser_conn; struct iscsi_iser_conn;
struct iscsi_iser_task; struct iscsi_iser_task;
struct iscsi_endpoint;
struct iser_mem_reg { struct iser_mem_reg {
u32 lkey; u32 lkey;
...@@ -241,6 +242,7 @@ struct iser_device { ...@@ -241,6 +242,7 @@ struct iser_device {
struct iser_conn { struct iser_conn {
struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */ enum iser_ib_conn_state state; /* rdma connection state */
atomic_t refcount; atomic_t refcount;
spinlock_t lock; /* used for state changes */ spinlock_t lock; /* used for state changes */
...@@ -313,7 +315,7 @@ void iscsi_iser_recv(struct iscsi_conn *conn, ...@@ -313,7 +315,7 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
char *rx_data, char *rx_data,
int rx_data_len); int rx_data_len);
int iser_conn_init(struct iser_conn **ib_conn); void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_get(struct iser_conn *ib_conn); void iser_conn_get(struct iser_conn *ib_conn);
......
...@@ -325,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn) ...@@ -325,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
iser_device_try_release(device); iser_device_try_release(device);
if (ib_conn->iser_conn) if (ib_conn->iser_conn)
ib_conn->iser_conn->ib_conn = NULL; ib_conn->iser_conn->ib_conn = NULL;
kfree(ib_conn); iscsi_destroy_endpoint(ib_conn->ep);
} }
void iser_conn_get(struct iser_conn *ib_conn) void iser_conn_get(struct iser_conn *ib_conn)
...@@ -494,15 +494,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve ...@@ -494,15 +494,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
return ret; return ret;
} }
int iser_conn_init(struct iser_conn **ibconn) void iser_conn_init(struct iser_conn *ib_conn)
{ {
struct iser_conn *ib_conn;
ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
if (!ib_conn) {
iser_err("can't alloc memory for struct iser_conn\n");
return -ENOMEM;
}
ib_conn->state = ISER_CONN_INIT; ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait); init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_recv_buf_count, 0);
...@@ -510,9 +503,6 @@ int iser_conn_init(struct iser_conn **ibconn) ...@@ -510,9 +503,6 @@ int iser_conn_init(struct iser_conn **ibconn)
atomic_set(&ib_conn->refcount, 1); atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list); INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock); spin_lock_init(&ib_conn->lock);
*ibconn = ib_conn;
return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment