Commit f8457d58 authored by Doug Ledford's avatar Doug Ledford

Merge branch 'bart-srpt-for-next' into k.o/wip/dl-for-next

Merging in 12 patch series from Bart that required changes in the
current for-rc branch in order to apply cleanly.
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents c4b76d8d 2d67017c
...@@ -293,7 +293,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, ...@@ -293,7 +293,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
} }
#endif #endif
struct ib_device *__ib_device_get_by_index(u32 ifindex); struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */ /* RDMA device netlink */
void nldev_init(void); void nldev_init(void);
void nldev_exit(void); void nldev_exit(void);
......
...@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device) ...@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0; return 0;
} }
struct ib_device *__ib_device_get_by_index(u32 index) static struct ib_device *__ib_device_get_by_index(u32 index)
{ {
struct ib_device *device; struct ib_device *device;
...@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index) ...@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
return NULL; return NULL;
} }
/*
* Caller is responsible to return refrerence count by calling put_device()
*/
struct ib_device *ib_device_get_by_index(u32 index)
{
struct ib_device *device;
down_read(&lists_rwsem);
device = __ib_device_get_by_index(index);
if (device)
get_device(&device->dev);
up_read(&lists_rwsem);
return device;
}
static struct ib_device *__ib_device_get_by_name(const char *name) static struct ib_device *__ib_device_get_by_name(const char *name)
{ {
struct ib_device *device; struct ib_device *device;
......
...@@ -150,27 +150,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -150,27 +150,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = __ib_device_get_by_index(index); device = ib_device_get_by_index(index);
if (!device) if (!device)
return -EINVAL; return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) if (!msg) {
return -ENOMEM; err = -ENOMEM;
goto err;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0); 0, 0);
err = fill_dev_info(msg, device); err = fill_dev_info(msg, device);
if (err) { if (err)
nlmsg_free(msg); goto err_free;
return err;
}
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
err:
put_device(&device->dev);
return err;
} }
static int _nldev_get_dumpit(struct ib_device *device, static int _nldev_get_dumpit(struct ib_device *device,
...@@ -228,31 +235,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -228,31 +235,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL; return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = __ib_device_get_by_index(index); device = ib_device_get_by_index(index);
if (!device) if (!device)
return -EINVAL; return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
if (!rdma_is_port_valid(device, port)) if (!rdma_is_port_valid(device, port)) {
return -EINVAL; err = -EINVAL;
goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) if (!msg) {
return -ENOMEM; err = -ENOMEM;
goto err;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0); 0, 0);
err = fill_port_info(msg, device, port); err = fill_port_info(msg, device, port);
if (err) { if (err)
nlmsg_free(msg); goto err_free;
return err;
}
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
err_free:
nlmsg_free(msg);
err:
put_device(&device->dev);
return err;
} }
static int nldev_port_get_dumpit(struct sk_buff *skb, static int nldev_port_get_dumpit(struct sk_buff *skb,
...@@ -273,7 +289,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, ...@@ -273,7 +289,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
return -EINVAL; return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
device = __ib_device_get_by_index(ifindex); device = ib_device_get_by_index(ifindex);
if (!device) if (!device)
return -EINVAL; return -EINVAL;
...@@ -307,7 +323,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, ...@@ -307,7 +323,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
} }
out: cb->args[0] = idx; out:
put_device(&device->dev);
cb->args[0] = idx;
return skb->len; return skb->len;
} }
......
...@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, ...@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr; goto err_free_mr;
mr->max_pages = max_num_sg; mr->max_pages = max_num_sg;
err = mlx4_mr_enable(dev->dev, &mr->mmr); err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err) if (err)
goto err_free_pl; goto err_free_pl;
...@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, ...@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr; return &mr->ibmr;
err_free_pl: err_free_pl:
mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr); mlx4_free_priv_pages(mr);
err_free_mr: err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr); (void) mlx4_mr_free(dev->dev, &mr->mmr);
......
...@@ -926,7 +926,7 @@ static int path_rec_start(struct net_device *dev, ...@@ -926,7 +926,7 @@ static int path_rec_start(struct net_device *dev,
return 0; return 0;
} }
static void neigh_add_path(struct sk_buff *skb, u8 *daddr, static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct net_device *dev) struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
...@@ -941,7 +941,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -941,7 +941,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return NULL;
}
/* To avoid race condition, make sure that the
* neigh will be added only once.
*/
if (unlikely(!list_empty(&neigh->list))) {
spin_unlock_irqrestore(&priv->lock, flags);
return neigh;
} }
path = __path_find(dev, daddr + 4); path = __path_find(dev, daddr + 4);
...@@ -980,7 +988,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -980,7 +988,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
path->ah->last_send = rn->send(dev, skb, path->ah->ah, path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr)); IPOIB_QPN(daddr));
ipoib_neigh_put(neigh); ipoib_neigh_put(neigh);
return; return NULL;
} }
} else { } else {
neigh->ah = NULL; neigh->ah = NULL;
...@@ -997,7 +1005,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -997,7 +1005,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh); ipoib_neigh_put(neigh);
return; return NULL;
err_path: err_path:
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
...@@ -1007,6 +1015,8 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -1007,6 +1015,8 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh); ipoib_neigh_put(neigh);
return NULL;
} }
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
...@@ -1127,7 +1137,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1127,7 +1137,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC): case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr); neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) { if (unlikely(!neigh)) {
neigh_add_path(skb, phdr->hwaddr, dev); neigh = neigh_add_path(skb, phdr->hwaddr, dev);
if (likely(!neigh))
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
break; break;
......
...@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) ...@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
if (!neigh) { if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev); neigh = ipoib_neigh_alloc(daddr, dev);
if (neigh) { /* Make sure that the neigh will be added only
* once to mcast list.
*/
if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref); kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah; neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list); list_add_tail(&neigh->list, &mcast->neigh_list);
......
...@@ -120,7 +120,9 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new) ...@@ -120,7 +120,9 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
} }
/** /**
* srpt_event_handler() - Asynchronous IB event callback function. * srpt_event_handler - asynchronous IB event callback function
* @handler: IB event handler registered by ib_register_event_handler().
* @event: Description of the event that occurred.
* *
* Callback function called by the InfiniBand core when an asynchronous IB * Callback function called by the InfiniBand core when an asynchronous IB
* event occurs. This callback may occur in interrupt context. See also * event occurs. This callback may occur in interrupt context. See also
...@@ -132,6 +134,7 @@ static void srpt_event_handler(struct ib_event_handler *handler, ...@@ -132,6 +134,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
{ {
struct srpt_device *sdev; struct srpt_device *sdev;
struct srpt_port *sport; struct srpt_port *sport;
u8 port_num;
sdev = ib_get_client_data(event->device, &srpt_client); sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device) if (!sdev || sdev->device != event->device)
...@@ -142,10 +145,15 @@ static void srpt_event_handler(struct ib_event_handler *handler, ...@@ -142,10 +145,15 @@ static void srpt_event_handler(struct ib_event_handler *handler,
switch (event->event) { switch (event->event) {
case IB_EVENT_PORT_ERR: case IB_EVENT_PORT_ERR:
if (event->element.port_num <= sdev->device->phys_port_cnt) { port_num = event->element.port_num - 1;
sport = &sdev->port[event->element.port_num - 1]; if (port_num < sdev->device->phys_port_cnt) {
sport = &sdev->port[port_num];
sport->lid = 0; sport->lid = 0;
sport->sm_lid = 0; sport->sm_lid = 0;
} else {
WARN(true, "event %d: port_num %d out of range 1..%d\n",
event->event, port_num + 1,
sdev->device->phys_port_cnt);
} }
break; break;
case IB_EVENT_PORT_ACTIVE: case IB_EVENT_PORT_ACTIVE:
...@@ -155,25 +163,31 @@ static void srpt_event_handler(struct ib_event_handler *handler, ...@@ -155,25 +163,31 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_CLIENT_REREGISTER: case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE: case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */ /* Refresh port data asynchronously. */
if (event->element.port_num <= sdev->device->phys_port_cnt) { port_num = event->element.port_num - 1;
sport = &sdev->port[event->element.port_num - 1]; if (port_num < sdev->device->phys_port_cnt) {
sport = &sdev->port[port_num];
if (!sport->lid && !sport->sm_lid) if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work); schedule_work(&sport->work);
} else {
WARN(true, "event %d: port_num %d out of range 1..%d\n",
event->event, port_num + 1,
sdev->device->phys_port_cnt);
} }
break; break;
default: default:
pr_err("received unrecognized IB event %d\n", pr_err("received unrecognized IB event %d\n", event->event);
event->event);
break; break;
} }
} }
/** /**
* srpt_srq_event() - SRQ event callback function. * srpt_srq_event - SRQ event callback function
* @event: Description of the event that occurred.
* @ctx: Context pointer specified at SRQ creation time.
*/ */
static void srpt_srq_event(struct ib_event *event, void *ctx) static void srpt_srq_event(struct ib_event *event, void *ctx)
{ {
pr_info("SRQ event %d\n", event->event); pr_debug("SRQ event %d\n", event->event);
} }
static const char *get_ch_state_name(enum rdma_ch_state s) static const char *get_ch_state_name(enum rdma_ch_state s)
...@@ -194,7 +208,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s) ...@@ -194,7 +208,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
} }
/** /**
* srpt_qp_event() - QP event callback function. * srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
* @ch: SRPT RDMA channel.
*/ */
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{ {
...@@ -217,8 +233,8 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) ...@@ -217,8 +233,8 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
} }
/** /**
* srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure. * srpt_set_ioc - initialize a IOUnitInfo structure
* * @c_list: controller list.
* @slot: one-based slot number. * @slot: one-based slot number.
* @value: four-bit value. * @value: four-bit value.
* *
...@@ -241,7 +257,8 @@ static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) ...@@ -241,7 +257,8 @@ static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
} }
/** /**
* srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram. * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
* @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
* *
* See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
* Specification. * Specification.
...@@ -260,7 +277,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad) ...@@ -260,7 +277,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
} }
/** /**
* srpt_get_iou() - Write IOUnitInfo to a management datagram. * srpt_get_iou - write IOUnitInfo to a management datagram
* @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
* *
* See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
* Specification. See also section B.7, table B.6 in the SRP r16a document. * Specification. See also section B.7, table B.6 in the SRP r16a document.
...@@ -284,7 +302,10 @@ static void srpt_get_iou(struct ib_dm_mad *mad) ...@@ -284,7 +302,10 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
} }
/** /**
* srpt_get_ioc() - Write IOControllerprofile to a management datagram. * srpt_get_ioc - write IOControllerprofile to a management datagram
* @sport: HCA port through which the MAD has been received.
* @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
* @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
* *
* See also section 16.3.3.4 IOControllerProfile in the InfiniBand * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
* Architecture Specification. See also section B.7, table B.7 in the SRP * Architecture Specification. See also section B.7, table B.7 in the SRP
...@@ -314,7 +335,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, ...@@ -314,7 +335,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (sdev->use_srq) if (sdev->use_srq)
send_queue_depth = sdev->srq_size; send_queue_depth = sdev->srq_size;
else else
send_queue_depth = min(SRPT_RQ_SIZE, send_queue_depth = min(MAX_SRPT_RQ_SIZE,
sdev->device->attrs.max_qp_wr); sdev->device->attrs.max_qp_wr);
memset(iocp, 0, sizeof(*iocp)); memset(iocp, 0, sizeof(*iocp));
...@@ -342,7 +363,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, ...@@ -342,7 +363,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
} }
/** /**
* srpt_get_svc_entries() - Write ServiceEntries to a management datagram. * srpt_get_svc_entries - write ServiceEntries to a management datagram
* @ioc_guid: I/O controller GUID to use in reply.
* @slot: I/O controller number.
* @hi: End of the range of service entries to be specified in the reply.
* @lo: Start of the range of service entries to be specified in the reply..
* @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
* *
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
* Specification. See also section B.7, table B.8 in the SRP r16a document. * Specification. See also section B.7, table B.8 in the SRP r16a document.
...@@ -379,8 +405,8 @@ static void srpt_get_svc_entries(u64 ioc_guid, ...@@ -379,8 +405,8 @@ static void srpt_get_svc_entries(u64 ioc_guid,
} }
/** /**
* srpt_mgmt_method_get() - Process a received management datagram. * srpt_mgmt_method_get - process a received management datagram
* @sp: source port through which the MAD has been received. * @sp: HCA port through which the MAD has been received.
* @rq_mad: received MAD. * @rq_mad: received MAD.
* @rsp_mad: response MAD. * @rsp_mad: response MAD.
*/ */
...@@ -419,7 +445,9 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, ...@@ -419,7 +445,9 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
} }
/** /**
* srpt_mad_send_handler() - Post MAD-send callback function. * srpt_mad_send_handler - MAD send completion callback
* @mad_agent: Return value of ib_register_mad_agent().
* @mad_wc: Work completion reporting that the MAD has been sent.
*/ */
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc) struct ib_mad_send_wc *mad_wc)
...@@ -429,7 +457,10 @@ static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, ...@@ -429,7 +457,10 @@ static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
} }
/** /**
* srpt_mad_recv_handler() - MAD reception callback function. * srpt_mad_recv_handler - MAD reception callback function
* @mad_agent: Return value of ib_register_mad_agent().
* @send_buf: Not used.
* @mad_wc: Work completion reporting that a MAD has been received.
*/ */
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf, struct ib_mad_send_buf *send_buf,
...@@ -493,8 +524,18 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, ...@@ -493,8 +524,18 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
ib_free_recv_mad(mad_wc); ib_free_recv_mad(mad_wc);
} }
static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
{
const __be16 *g = (const __be16 *)guid;
return snprintf(buf, size, "%04x:%04x:%04x:%04x",
be16_to_cpu(g[0]), be16_to_cpu(g[1]),
be16_to_cpu(g[2]), be16_to_cpu(g[3]));
}
/** /**
* srpt_refresh_port() - Configure a HCA port. * srpt_refresh_port - configure a HCA port
* @sport: SRPT HCA port.
* *
* Enable InfiniBand management datagram processing, update the cached sm_lid, * Enable InfiniBand management datagram processing, update the cached sm_lid,
* lid and gid values, and register a callback function for processing MADs * lid and gid values, and register a callback function for processing MADs
...@@ -507,7 +548,6 @@ static int srpt_refresh_port(struct srpt_port *sport) ...@@ -507,7 +548,6 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_mad_reg_req reg_req; struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify; struct ib_port_modify port_modify;
struct ib_port_attr port_attr; struct ib_port_attr port_attr;
__be16 *guid;
int ret; int ret;
memset(&port_modify, 0, sizeof(port_modify)); memset(&port_modify, 0, sizeof(port_modify));
...@@ -531,11 +571,8 @@ static int srpt_refresh_port(struct srpt_port *sport) ...@@ -531,11 +571,8 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port; goto err_query_port;
sport->port_guid_wwn.priv = sport; sport->port_guid_wwn.priv = sport;
guid = (__be16 *)&sport->gid.global.interface_id; srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
snprintf(sport->port_guid, sizeof(sport->port_guid), &sport->gid.global.interface_id);
"%04x:%04x:%04x:%04x",
be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
sport->port_gid_wwn.priv = sport; sport->port_gid_wwn.priv = sport;
snprintf(sport->port_gid, sizeof(sport->port_gid), snprintf(sport->port_gid, sizeof(sport->port_gid),
"0x%016llx%016llx", "0x%016llx%016llx",
...@@ -577,7 +614,8 @@ static int srpt_refresh_port(struct srpt_port *sport) ...@@ -577,7 +614,8 @@ static int srpt_refresh_port(struct srpt_port *sport)
} }
/** /**
* srpt_unregister_mad_agent() - Unregister MAD callback functions. * srpt_unregister_mad_agent - unregister MAD callback functions
* @sdev: SRPT HCA pointer.
* *
* Note: It is safe to call this function more than once for the same device. * Note: It is safe to call this function more than once for the same device.
*/ */
...@@ -602,7 +640,11 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev) ...@@ -602,7 +640,11 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
} }
/** /**
* srpt_alloc_ioctx() - Allocate an SRPT I/O context structure. * srpt_alloc_ioctx - allocate a SRPT I/O context structure
* @sdev: SRPT HCA pointer.
* @ioctx_size: I/O context size.
* @dma_size: Size of I/O context DMA buffer.
* @dir: DMA data direction.
*/ */
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
int ioctx_size, int dma_size, int ioctx_size, int dma_size,
...@@ -633,7 +675,11 @@ static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, ...@@ -633,7 +675,11 @@ static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
} }
/** /**
* srpt_free_ioctx() - Free an SRPT I/O context structure. * srpt_free_ioctx - free a SRPT I/O context structure
* @sdev: SRPT HCA pointer.
* @ioctx: I/O context pointer.
* @dma_size: Size of I/O context DMA buffer.
* @dir: DMA data direction.
*/ */
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
int dma_size, enum dma_data_direction dir) int dma_size, enum dma_data_direction dir)
...@@ -647,7 +693,7 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, ...@@ -647,7 +693,7 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
} }
/** /**
* srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures. * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
* @sdev: Device to allocate the I/O context ring for. * @sdev: Device to allocate the I/O context ring for.
* @ring_size: Number of elements in the I/O context ring. * @ring_size: Number of elements in the I/O context ring.
* @ioctx_size: I/O context size. * @ioctx_size: I/O context size.
...@@ -685,7 +731,12 @@ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, ...@@ -685,7 +731,12 @@ static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
} }
/** /**
* srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures. * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
* @ioctx_ring: I/O context ring to be freed.
* @sdev: SRPT HCA pointer.
* @ring_size: Number of ring elements.
* @dma_size: Size of I/O context DMA buffer.
* @dir: DMA data direction.
*/ */
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
struct srpt_device *sdev, int ring_size, struct srpt_device *sdev, int ring_size,
...@@ -702,23 +753,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, ...@@ -702,23 +753,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
} }
/** /**
* srpt_get_cmd_state() - Get the state of a SCSI command. * srpt_set_cmd_state - set the state of a SCSI command
*/ * @ioctx: Send I/O context.
static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx) * @new: New I/O context state.
{
enum srpt_command_state state;
unsigned long flags;
BUG_ON(!ioctx);
spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
spin_unlock_irqrestore(&ioctx->spinlock, flags);
return state;
}
/**
* srpt_set_cmd_state() - Set the state of a SCSI command.
* *
* Does not modify the state of aborted commands. Returns the previous command * Does not modify the state of aborted commands. Returns the previous command
* state. * state.
...@@ -727,21 +764,19 @@ static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, ...@@ -727,21 +764,19 @@ static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new) enum srpt_command_state new)
{ {
enum srpt_command_state previous; enum srpt_command_state previous;
unsigned long flags;
BUG_ON(!ioctx);
spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state; previous = ioctx->state;
if (previous != SRPT_STATE_DONE) if (previous != SRPT_STATE_DONE)
ioctx->state = new; ioctx->state = new;
spin_unlock_irqrestore(&ioctx->spinlock, flags);
return previous; return previous;
} }
/** /**
* srpt_test_and_set_cmd_state() - Test and set the state of a command. * srpt_test_and_set_cmd_state - test and set the state of a command
* @ioctx: Send I/O context.
* @old: Current I/O context state.
* @new: New I/O context state.
* *
* Returns true if and only if the previous command state was equal to 'old'. * Returns true if and only if the previous command state was equal to 'old'.
*/ */
...@@ -750,22 +785,23 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, ...@@ -750,22 +785,23 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new) enum srpt_command_state new)
{ {
enum srpt_command_state previous; enum srpt_command_state previous;
unsigned long flags;
WARN_ON(!ioctx); WARN_ON(!ioctx);
WARN_ON(old == SRPT_STATE_DONE); WARN_ON(old == SRPT_STATE_DONE);
WARN_ON(new == SRPT_STATE_NEW); WARN_ON(new == SRPT_STATE_NEW);
spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state; previous = ioctx->state;
if (previous == old) if (previous == old)
ioctx->state = new; ioctx->state = new;
spin_unlock_irqrestore(&ioctx->spinlock, flags);
return previous == old; return previous == old;
} }
/** /**
* srpt_post_recv() - Post an IB receive request. * srpt_post_recv - post an IB receive request
* @sdev: SRPT HCA pointer.
* @ch: SRPT RDMA channel.
* @ioctx: Receive I/O context pointer.
*/ */
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch, static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx) struct srpt_recv_ioctx *ioctx)
...@@ -791,7 +827,8 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch, ...@@ -791,7 +827,8 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
} }
/** /**
* srpt_zerolength_write() - Perform a zero-length RDMA write. * srpt_zerolength_write - perform a zero-length RDMA write
* @ch: SRPT RDMA channel.
* *
* A quote from the InfiniBand specification: C9-88: For an HCA responder * A quote from the InfiniBand specification: C9-88: For an HCA responder
* using Reliable Connection service, for each zero-length RDMA READ or WRITE * using Reliable Connection service, for each zero-length RDMA READ or WRITE
...@@ -819,7 +856,8 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -819,7 +856,8 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
if (srpt_set_ch_state(ch, CH_DISCONNECTED)) if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work); schedule_work(&ch->release_work);
else else
WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num); pr_debug("%s-%d: already disconnected.\n",
ch->sess_name, ch->qp->qp_num);
} }
} }
...@@ -928,11 +966,13 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd) ...@@ -928,11 +966,13 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
} }
/** /**
* srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request. * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
* @ioctx: Pointer to the I/O context associated with the request. * @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data. * @srp_cmd: Pointer to the SRP_CMD request data.
* @dir: Pointer to the variable to which the transfer direction will be * @dir: Pointer to the variable to which the transfer direction will be
* written. * written.
* @sg: [out] scatterlist allocated for the parsed SRP_CMD.
* @sg_cnt: [out] length of @sg.
* @data_len: Pointer to the variable to which the total data length of all * @data_len: Pointer to the variable to which the total data length of all
* descriptors in the SRP_CMD request will be written. * descriptors in the SRP_CMD request will be written.
* *
...@@ -998,7 +1038,9 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, ...@@ -998,7 +1038,9 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
} }
/** /**
* srpt_init_ch_qp() - Initialize queue pair attributes. * srpt_init_ch_qp - initialize queue pair attributes
* @ch: SRPT RDMA channel.
* @qp: Queue pair pointer.
* *
* Initialized the attributes of queue pair 'qp' by allowing local write, * Initialized the attributes of queue pair 'qp' by allowing local write,
* remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
...@@ -1013,8 +1055,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) ...@@ -1013,8 +1055,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
return -ENOMEM; return -ENOMEM;
attr->qp_state = IB_QPS_INIT; attr->qp_state = IB_QPS_INIT;
attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
IB_ACCESS_REMOTE_WRITE;
attr->port_num = ch->sport->port; attr->port_num = ch->sport->port;
attr->pkey_index = 0; attr->pkey_index = 0;
...@@ -1027,7 +1068,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) ...@@ -1027,7 +1068,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
} }
/** /**
* srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR). * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
* @ch: channel of the queue pair. * @ch: channel of the queue pair.
* @qp: queue pair to change the state of. * @qp: queue pair to change the state of.
* *
...@@ -1057,7 +1098,7 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) ...@@ -1057,7 +1098,7 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
} }
/** /**
* srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS). * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
* @ch: channel of the queue pair. * @ch: channel of the queue pair.
* @qp: queue pair to change the state of. * @qp: queue pair to change the state of.
* *
...@@ -1087,7 +1128,8 @@ static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) ...@@ -1087,7 +1128,8 @@ static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
} }
/** /**
* srpt_ch_qp_err() - Set the channel queue pair state to 'error'. * srpt_ch_qp_err - set the channel queue pair state to 'error'
* @ch: SRPT RDMA channel.
*/ */
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{ {
...@@ -1098,7 +1140,8 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) ...@@ -1098,7 +1140,8 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
} }
/** /**
* srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator. * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
* @ch: SRPT RDMA channel.
*/ */
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{ {
...@@ -1120,11 +1163,9 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) ...@@ -1120,11 +1163,9 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx; return ioctx;
BUG_ON(ioctx->ch != ch); BUG_ON(ioctx->ch != ch);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW; ioctx->state = SRPT_STATE_NEW;
ioctx->n_rdma = 0; ioctx->n_rdma = 0;
ioctx->n_rw_ctx = 0; ioctx->n_rw_ctx = 0;
init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false; ioctx->queue_status_only = false;
/* /*
* transport_init_se_cmd() does not initialize all fields, so do it * transport_init_se_cmd() does not initialize all fields, so do it
...@@ -1137,14 +1178,12 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) ...@@ -1137,14 +1178,12 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
} }
/** /**
* srpt_abort_cmd() - Abort a SCSI command. * srpt_abort_cmd - abort a SCSI command
* @ioctx: I/O context associated with the SCSI command. * @ioctx: I/O context associated with the SCSI command.
* @context: Preferred execution context.
*/ */
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{ {
enum srpt_command_state state; enum srpt_command_state state;
unsigned long flags;
BUG_ON(!ioctx); BUG_ON(!ioctx);
...@@ -1153,7 +1192,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1153,7 +1192,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* the ib_srpt driver, change the state to the next state. * the ib_srpt driver, change the state to the next state.
*/ */
spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state; state = ioctx->state;
switch (state) { switch (state) {
case SRPT_STATE_NEED_DATA: case SRPT_STATE_NEED_DATA:
...@@ -1168,7 +1206,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1168,7 +1206,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
__func__, state); __func__, state);
break; break;
} }
spin_unlock_irqrestore(&ioctx->spinlock, flags);
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state, pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
ioctx->state, ioctx->cmd.tag); ioctx->state, ioctx->cmd.tag);
...@@ -1207,6 +1244,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1207,6 +1244,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
} }
/** /**
* srpt_rdma_read_done - RDMA read completion callback
* @cq: Completion queue.
* @wc: Work completion.
*
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the * the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. None of this is necessary anymore and needs to * check_stop_free() callback. None of this is necessary anymore and needs to
...@@ -1234,11 +1275,11 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1234,11 +1275,11 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
target_execute_cmd(&ioctx->cmd); target_execute_cmd(&ioctx->cmd);
else else
pr_err("%s[%d]: wrong state = %d\n", __func__, pr_err("%s[%d]: wrong state = %d\n", __func__,
__LINE__, srpt_get_cmd_state(ioctx)); __LINE__, ioctx->state);
} }
/** /**
* srpt_build_cmd_rsp() - Build an SRP_RSP response. * srpt_build_cmd_rsp - build a SRP_RSP response
* @ch: RDMA channel through which the request has been received. * @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context associated with the SRP_CMD request. The response will * @ioctx: I/O context associated with the SRP_CMD request. The response will
* be built in the buffer ioctx->buf points at and hence this function will * be built in the buffer ioctx->buf points at and hence this function will
...@@ -1298,7 +1339,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, ...@@ -1298,7 +1339,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
} }
/** /**
* srpt_build_tskmgmt_rsp() - Build a task management response. * srpt_build_tskmgmt_rsp - build a task management response
* @ch: RDMA channel through which the request has been received. * @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context in which the SRP_RSP response will be built. * @ioctx: I/O context in which the SRP_RSP response will be built.
* @rsp_code: RSP_CODE that will be stored in the response. * @rsp_code: RSP_CODE that will be stored in the response.
...@@ -1346,7 +1387,10 @@ static int srpt_check_stop_free(struct se_cmd *cmd) ...@@ -1346,7 +1387,10 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
} }
/** /**
* srpt_handle_cmd() - Process SRP_CMD. * srpt_handle_cmd - process a SRP_CMD information unit
* @ch: SRPT RDMA channel.
* @recv_ioctx: Receive I/O context.
* @send_ioctx: Send I/O context.
*/ */
static void srpt_handle_cmd(struct srpt_rdma_ch *ch, static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx, struct srpt_recv_ioctx *recv_ioctx,
...@@ -1428,7 +1472,10 @@ static int srp_tmr_to_tcm(int fn) ...@@ -1428,7 +1472,10 @@ static int srp_tmr_to_tcm(int fn)
} }
/** /**
* srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit. * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
* @ch: SRPT RDMA channel.
* @recv_ioctx: Receive I/O context.
* @send_ioctx: Send I/O context.
* *
* Returns 0 if and only if the request will be processed by the target core. * Returns 0 if and only if the request will be processed by the target core.
* *
...@@ -1471,9 +1518,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, ...@@ -1471,9 +1518,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
} }
/** /**
* srpt_handle_new_iu() - Process a newly received information unit. * srpt_handle_new_iu - process a newly received information unit
* @ch: RDMA channel through which the information unit has been received. * @ch: RDMA channel through which the information unit has been received.
* @ioctx: SRPT I/O context associated with the information unit. * @recv_ioctx: Receive I/O context associated with the information unit.
* @send_ioctx: Send I/O context.
*/ */
static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx, struct srpt_recv_ioctx *recv_ioctx,
...@@ -1551,7 +1599,7 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1551,7 +1599,7 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
pr_err("req_lim = %d < 0\n", req_lim); pr_err("req_lim = %d < 0\n", req_lim);
srpt_handle_new_iu(ch, ioctx, NULL); srpt_handle_new_iu(ch, ioctx, NULL);
} else { } else {
pr_info("receiving failed for ioctx %p with status %d\n", pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
ioctx, wc->status); ioctx, wc->status);
} }
} }
...@@ -1579,6 +1627,10 @@ static void srpt_process_wait_list(struct srpt_rdma_ch *ch) ...@@ -1579,6 +1627,10 @@ static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
} }
/** /**
* srpt_send_done - send completion callback
* @cq: Completion queue.
* @wc: Work completion.
*
* Note: Although this has not yet been observed during tests, at least in * Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by * theory it is possible that the srpt_get_send_ioctx() call invoked by
* srpt_handle_new_iu() fails. This is possible because the req_lim_delta * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
...@@ -1620,7 +1672,8 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1620,7 +1672,8 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
} }
/** /**
* srpt_create_ch_ib() - Create receive and send completion queues. * srpt_create_ch_ib - create receive and send completion queues
* @ch: SRPT RDMA channel.
*/ */
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{ {
...@@ -1628,7 +1681,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -1628,7 +1681,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_port *sport = ch->sport; struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev; struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs; const struct ib_device_attr *attrs = &sdev->device->attrs;
u32 srp_sq_size = sport->port_attrib.srp_sq_size; int sq_size = sport->port_attrib.srp_sq_size;
int i, ret; int i, ret;
WARN_ON(ch->rq_size < 1); WARN_ON(ch->rq_size < 1);
...@@ -1639,12 +1692,12 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -1639,12 +1692,12 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out; goto out;
retry: retry:
ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size, ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE); 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) { if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq); ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n", pr_err("failed to create CQ cqe= %d ret= %d\n",
ch->rq_size + srp_sq_size, ret); ch->rq_size + sq_size, ret);
goto out; goto out;
} }
...@@ -1662,8 +1715,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -1662,8 +1715,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
* both both, as RDMA contexts will also post completions for the * both both, as RDMA contexts will also post completions for the
* RDMA READ case. * RDMA READ case.
*/ */
qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U); qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; qp_init->cap.max_rdma_ctxs = sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE); qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port; qp_init->port_num = ch->sport->port;
if (sdev->use_srq) { if (sdev->use_srq) {
...@@ -1677,8 +1730,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -1677,8 +1730,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
if (IS_ERR(ch->qp)) { if (IS_ERR(ch->qp)) {
ret = PTR_ERR(ch->qp); ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) { if (ret == -ENOMEM) {
srp_sq_size /= 2; sq_size /= 2;
if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { if (sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq); ib_destroy_cq(ch->cq);
goto retry; goto retry;
} }
...@@ -1719,7 +1772,8 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) ...@@ -1719,7 +1772,8 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
} }
/** /**
* srpt_close_ch() - Close an RDMA channel. * srpt_close_ch - close a RDMA channel
* @ch: SRPT RDMA channel.
* *
* Make sure all resources associated with the channel will be deallocated at * Make sure all resources associated with the channel will be deallocated at
* an appropriate time. * an appropriate time.
...@@ -1884,7 +1938,7 @@ static void srpt_release_channel_work(struct work_struct *w) ...@@ -1884,7 +1938,7 @@ static void srpt_release_channel_work(struct work_struct *w)
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size, ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE); ch->max_rsp_size, DMA_TO_DEVICE);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size, sdev, ch->rq_size,
...@@ -1902,7 +1956,10 @@ static void srpt_release_channel_work(struct work_struct *w) ...@@ -1902,7 +1956,10 @@ static void srpt_release_channel_work(struct work_struct *w)
} }
/** /**
* srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED. * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
* @cm_id: IB/CM connection identifier.
* @param: IB/CM REQ parameters.
* @private_data: IB/CM REQ private data.
* *
* Ownership of the cm_id is transferred to the target session if this * Ownership of the cm_id is transferred to the target session if this
* functions returns zero. Otherwise the caller remains the owner of cm_id. * functions returns zero. Otherwise the caller remains the owner of cm_id.
...@@ -1918,7 +1975,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -1918,7 +1975,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srp_login_rej *rej; struct srp_login_rej *rej;
struct ib_cm_rep_param *rep_param; struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch; struct srpt_rdma_ch *ch, *tmp_ch;
__be16 *guid;
u32 it_iu_len; u32 it_iu_len;
int i, ret = 0; int i, ret = 0;
...@@ -1931,17 +1987,9 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -1931,17 +1987,9 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
it_iu_len = be32_to_cpu(req->req_it_iu_len); it_iu_len = be32_to_cpu(req->req_it_iu_len);
pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6)\n",
" t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" req->initiator_port_id, req->target_port_id, it_iu_len,
" (guid=0x%llx:0x%llx)\n", param->port, &sport->gid);
be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
it_iu_len,
param->port,
be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
rej = kzalloc(sizeof(*rej), GFP_KERNEL); rej = kzalloc(sizeof(*rej), GFP_KERNEL);
...@@ -2029,16 +2077,16 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2029,16 +2077,16 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
* depth to avoid that the initiator driver has to report QUEUE_FULL * depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer. * to the SCSI mid-layer.
*/ */
ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr); ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock); spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING; ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list); INIT_LIST_HEAD(&ch->cmd_wait_list);
ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size; ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->ioctx_ring = (struct srpt_send_ioctx **) ch->ioctx_ring = (struct srpt_send_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_ring[0]), sizeof(*ch->ioctx_ring[0]),
ch->rsp_size, DMA_TO_DEVICE); ch->max_rsp_size, DMA_TO_DEVICE);
if (!ch->ioctx_ring) if (!ch->ioctx_ring)
goto free_ch; goto free_ch;
...@@ -2078,10 +2126,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2078,10 +2126,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto destroy_ib; goto destroy_ib;
} }
guid = (__be16 *)&param->primary_path->sgid.global.interface_id; srpt_format_guid(ch->ini_guid, sizeof(ch->ini_guid),
snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", &param->primary_path->dgid.global.interface_id);
be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
be64_to_cpu(*(__be64 *)ch->i_port_id), be64_to_cpu(*(__be64 *)ch->i_port_id),
be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
...@@ -2166,7 +2212,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -2166,7 +2212,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
free_ring: free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size, ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE); ch->max_rsp_size, DMA_TO_DEVICE);
free_ch: free_ch:
kfree(ch); kfree(ch);
...@@ -2207,7 +2253,8 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch, ...@@ -2207,7 +2253,8 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
} }
/** /**
* srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event. * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
* @ch: SRPT RDMA channel.
* *
* An IB_CM_RTU_RECEIVED message indicates that the connection is established * An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use). * and that the recipient may begin transmitting (RTU = ready to use).
...@@ -2230,7 +2277,9 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch) ...@@ -2230,7 +2277,9 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
} }
/** /**
* srpt_cm_handler() - IB connection manager callback function. * srpt_cm_handler - IB connection manager callback function
* @cm_id: IB/CM connection identifier.
* @event: IB/CM event.
* *
* A non-zero return value will cause the caller destroy the CM ID. * A non-zero return value will cause the caller destroy the CM ID.
* *
...@@ -2295,11 +2344,11 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd) ...@@ -2295,11 +2344,11 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx; struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA; return ioctx->state == SRPT_STATE_NEED_DATA;
} }
/* /*
* srpt_write_pending() - Start data transfer from initiator to target (write). * srpt_write_pending - Start data transfer from initiator to target (write).
*/ */
static int srpt_write_pending(struct se_cmd *se_cmd) static int srpt_write_pending(struct se_cmd *se_cmd)
{ {
...@@ -2356,7 +2405,8 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) ...@@ -2356,7 +2405,8 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
} }
/** /**
* srpt_queue_response() - Transmits the response to a SCSI command. * srpt_queue_response - transmit the response to a SCSI command
* @cmd: SCSI target command.
* *
* Callback function called by the TCM core. Must not block since it can be * Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler. * invoked on the context of the IB completion handler.
...@@ -2370,13 +2420,11 @@ static void srpt_queue_response(struct se_cmd *cmd) ...@@ -2370,13 +2420,11 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr; struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge; struct ib_sge sge;
enum srpt_command_state state; enum srpt_command_state state;
unsigned long flags;
int resp_len, ret, i; int resp_len, ret, i;
u8 srp_tm_status; u8 srp_tm_status;
BUG_ON(!ch); BUG_ON(!ch);
spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state; state = ioctx->state;
switch (state) { switch (state) {
case SRPT_STATE_NEW: case SRPT_STATE_NEW:
...@@ -2391,7 +2439,6 @@ static void srpt_queue_response(struct se_cmd *cmd) ...@@ -2391,7 +2439,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
ch, ioctx->ioctx.index, ioctx->state); ch, ioctx->ioctx.index, ioctx->state);
break; break;
} }
spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return; return;
...@@ -2496,7 +2543,8 @@ static void srpt_refresh_port_work(struct work_struct *work) ...@@ -2496,7 +2543,8 @@ static void srpt_refresh_port_work(struct work_struct *work)
} }
/** /**
* srpt_release_sdev() - Free the channel resources associated with a target. * srpt_release_sdev - disable login and wait for associated channels
* @sdev: SRPT HCA pointer.
*/ */
static int srpt_release_sdev(struct srpt_device *sdev) static int srpt_release_sdev(struct srpt_device *sdev)
{ {
...@@ -2624,7 +2672,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq) ...@@ -2624,7 +2672,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
} }
/** /**
* srpt_add_one() - Infiniband device addition callback function. * srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
*/ */
static void srpt_add_one(struct ib_device *device) static void srpt_add_one(struct ib_device *device)
{ {
...@@ -2722,7 +2771,9 @@ static void srpt_add_one(struct ib_device *device) ...@@ -2722,7 +2771,9 @@ static void srpt_add_one(struct ib_device *device)
} }
/** /**
* srpt_remove_one() - InfiniBand device removal callback function. * srpt_remove_one - InfiniBand device removal callback function
* @device: Describes a HCA.
* @client_data: The value passed as the third argument to ib_set_client_data().
*/ */
static void srpt_remove_one(struct ib_device *device, void *client_data) static void srpt_remove_one(struct ib_device *device, void *client_data)
{ {
...@@ -2828,7 +2879,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) ...@@ -2828,7 +2879,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
} }
/** /**
* srpt_close_session() - Forcibly close a session. * srpt_close_session - forcibly close a session
* @se_sess: SCSI target session.
* *
* Callback function invoked by the TCM core to clean up sessions associated * Callback function invoked by the TCM core to clean up sessions associated
* with a node ACL when the user invokes * with a node ACL when the user invokes
...@@ -2845,7 +2897,8 @@ static void srpt_close_session(struct se_session *se_sess) ...@@ -2845,7 +2897,8 @@ static void srpt_close_session(struct se_session *se_sess)
} }
/** /**
* srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
* @se_sess: SCSI target session.
* *
* A quote from RFC 4455 (SCSI-MIB) about this MIB object: * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
* This object represents an arbitrary integer used to uniquely identify a * This object represents an arbitrary integer used to uniquely identify a
...@@ -2867,7 +2920,7 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) ...@@ -2867,7 +2920,7 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx; struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
return srpt_get_cmd_state(ioctx); return ioctx->state;
} }
static int srpt_parse_guid(u64 *guid, const char *name) static int srpt_parse_guid(u64 *guid, const char *name)
...@@ -2884,7 +2937,7 @@ static int srpt_parse_guid(u64 *guid, const char *name) ...@@ -2884,7 +2937,7 @@ static int srpt_parse_guid(u64 *guid, const char *name)
} }
/** /**
* srpt_parse_i_port_id() - Parse an initiator port ID. * srpt_parse_i_port_id - parse an initiator port ID
* @name: ASCII representation of a 128-bit initiator port ID. * @name: ASCII representation of a 128-bit initiator port ID.
* @i_port_id: Binary 128-bit port ID. * @i_port_id: Binary 128-bit port ID.
*/ */
...@@ -3135,8 +3188,10 @@ static struct configfs_attribute *srpt_tpg_attrs[] = { ...@@ -3135,8 +3188,10 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
}; };
/** /**
* configfs callback invoked for * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* mkdir /sys/kernel/config/target/$driver/$port/$tpg * @wwn: Corresponds to $driver/$port.
* @group: Not used.
* @name: $tpg.
*/ */
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
struct config_group *group, struct config_group *group,
...@@ -3158,8 +3213,8 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, ...@@ -3158,8 +3213,8 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
} }
/** /**
* configfs callback invoked for * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
* rmdir /sys/kernel/config/target/$driver/$port/$tpg * @tpg: Target portal group to deregister.
*/ */
static void srpt_drop_tpg(struct se_portal_group *tpg) static void srpt_drop_tpg(struct se_portal_group *tpg)
{ {
...@@ -3170,8 +3225,10 @@ static void srpt_drop_tpg(struct se_portal_group *tpg) ...@@ -3170,8 +3225,10 @@ static void srpt_drop_tpg(struct se_portal_group *tpg)
} }
/** /**
* configfs callback invoked for * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
* mkdir /sys/kernel/config/target/$driver/$port * @tf: Not used.
* @group: Not used.
* @name: $port.
*/ */
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group, struct config_group *group,
...@@ -3181,8 +3238,8 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, ...@@ -3181,8 +3238,8 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
} }
/** /**
* configfs callback invoked for * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
* rmdir /sys/kernel/config/target/$driver/$port * @wwn: $port.
*/ */
static void srpt_drop_tport(struct se_wwn *wwn) static void srpt_drop_tport(struct se_wwn *wwn)
{ {
...@@ -3240,7 +3297,7 @@ static const struct target_core_fabric_ops srpt_template = { ...@@ -3240,7 +3297,7 @@ static const struct target_core_fabric_ops srpt_template = {
}; };
/** /**
* srpt_init_module() - Kernel module initialization. * srpt_init_module - kernel module initialization
* *
* Note: Since ib_register_client() registers callback functions, and since at * Note: Since ib_register_client() registers callback functions, and since at
* least one of these callback functions (srpt_add_one()) calls target core * least one of these callback functions (srpt_add_one()) calls target core
......
...@@ -114,7 +114,7 @@ enum { ...@@ -114,7 +114,7 @@ enum {
MIN_SRPT_SQ_SIZE = 16, MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096, DEF_SRPT_SQ_SIZE = 4096,
SRPT_RQ_SIZE = 128, MAX_SRPT_RQ_SIZE = 128,
MIN_SRPT_SRQ_SIZE = 4, MIN_SRPT_SRQ_SIZE = 4,
DEFAULT_SRPT_SRQ_SIZE = 4095, DEFAULT_SRPT_SRQ_SIZE = 4095,
MAX_SRPT_SRQ_SIZE = 65535, MAX_SRPT_SRQ_SIZE = 65535,
...@@ -134,7 +134,7 @@ enum { ...@@ -134,7 +134,7 @@ enum {
}; };
/** /**
* enum srpt_command_state - SCSI command state managed by SRPT. * enum srpt_command_state - SCSI command state managed by SRPT
* @SRPT_STATE_NEW: New command arrived and is being processed. * @SRPT_STATE_NEW: New command arrived and is being processed.
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
* for data arrival. * for data arrival.
...@@ -158,7 +158,8 @@ enum srpt_command_state { ...@@ -158,7 +158,8 @@ enum srpt_command_state {
}; };
/** /**
* struct srpt_ioctx - Shared SRPT I/O context information. * struct srpt_ioctx - shared SRPT I/O context information
* @cqe: Completion queue element.
* @buf: Pointer to the buffer. * @buf: Pointer to the buffer.
* @dma: DMA address of the buffer. * @dma: DMA address of the buffer.
* @index: Index of the I/O context in its ioctx_ring array. * @index: Index of the I/O context in its ioctx_ring array.
...@@ -171,7 +172,7 @@ struct srpt_ioctx { ...@@ -171,7 +172,7 @@ struct srpt_ioctx {
}; };
/** /**
* struct srpt_recv_ioctx - SRPT receive I/O context. * struct srpt_recv_ioctx - SRPT receive I/O context
* @ioctx: See above. * @ioctx: See above.
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list. * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
*/ */
...@@ -187,13 +188,20 @@ struct srpt_rw_ctx { ...@@ -187,13 +188,20 @@ struct srpt_rw_ctx {
}; };
/** /**
* struct srpt_send_ioctx - SRPT send I/O context. * struct srpt_send_ioctx - SRPT send I/O context
* @ioctx: See above. * @ioctx: See above.
* @ch: Channel pointer. * @ch: Channel pointer.
* @spinlock: Protects 'state'. * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
* @rw_ctxs: RDMA read/write contexts.
* @rdma_cqe: RDMA completion queue element.
* @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state. * @state: I/O context state.
* @cmd: Target core command data structure. * @cmd: Target core command data structure.
* @sense_data: SCSI sense data. * @sense_data: SCSI sense data.
* @n_rdma: Number of work requests needed to transfer this ioctx.
* @n_rw_ctx: Size of rw_ctxs array.
* @queue_status_only: Send a SCSI status back to the initiator but no data.
* @sense_data: Sense data to be sent to the initiator.
*/ */
struct srpt_send_ioctx { struct srpt_send_ioctx {
struct srpt_ioctx ioctx; struct srpt_ioctx ioctx;
...@@ -204,10 +212,8 @@ struct srpt_send_ioctx { ...@@ -204,10 +212,8 @@ struct srpt_send_ioctx {
struct ib_cqe rdma_cqe; struct ib_cqe rdma_cqe;
struct list_head free_list; struct list_head free_list;
spinlock_t spinlock;
enum srpt_command_state state; enum srpt_command_state state;
struct se_cmd cmd; struct se_cmd cmd;
struct completion tx_done;
u8 n_rdma; u8 n_rdma;
u8 n_rw_ctx; u8 n_rw_ctx;
bool queue_status_only; bool queue_status_only;
...@@ -215,7 +221,7 @@ struct srpt_send_ioctx { ...@@ -215,7 +221,7 @@ struct srpt_send_ioctx {
}; };
/** /**
* enum rdma_ch_state - SRP channel state. * enum rdma_ch_state - SRP channel state
* @CH_CONNECTING: QP is in RTR state; waiting for RTU. * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
* @CH_LIVE: QP is in RTS state. * @CH_LIVE: QP is in RTS state.
* @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
...@@ -233,12 +239,14 @@ enum rdma_ch_state { ...@@ -233,12 +239,14 @@ enum rdma_ch_state {
}; };
/** /**
* struct srpt_rdma_ch - RDMA channel. * struct srpt_rdma_ch - RDMA channel
* @cm_id: IB CM ID associated with the channel. * @cm_id: IB CM ID associated with the channel.
* @qp: IB queue pair used for communicating over this channel. * @qp: IB queue pair used for communicating over this channel.
* @cq: IB completion queue for this channel. * @cq: IB completion queue for this channel.
* @zw_cqe: Zero-length write CQE.
* @kref: kref for this channel.
* @rq_size: IB receive queue size. * @rq_size: IB receive queue size.
* @rsp_size IB response message size in bytes. * @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue. * @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this * @sport: pointer to the information of the HCA port used by this
* channel. * channel.
...@@ -270,7 +278,7 @@ struct srpt_rdma_ch { ...@@ -270,7 +278,7 @@ struct srpt_rdma_ch {
struct ib_cqe zw_cqe; struct ib_cqe zw_cqe;
struct kref kref; struct kref kref;
int rq_size; int rq_size;
u32 rsp_size; u32 max_rsp_size;
atomic_t sq_wr_avail; atomic_t sq_wr_avail;
struct srpt_port *sport; struct srpt_port *sport;
u8 i_port_id[16]; u8 i_port_id[16];
...@@ -293,7 +301,7 @@ struct srpt_rdma_ch { ...@@ -293,7 +301,7 @@ struct srpt_rdma_ch {
}; };
/** /**
* struct srpt_port_attib - Attributes for SRPT port * struct srpt_port_attib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections. * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes. * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size. * @srp_sq_size: Shared receive queue (SRQ) size.
...@@ -307,7 +315,7 @@ struct srpt_port_attrib { ...@@ -307,7 +315,7 @@ struct srpt_port_attrib {
}; };
/** /**
* struct srpt_port - Information associated by SRPT with a single IB port. * struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information. * @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information. * @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled. * @enabled: Whether or not this target port is enabled.
...@@ -323,7 +331,7 @@ struct srpt_port_attrib { ...@@ -323,7 +331,7 @@ struct srpt_port_attrib {
* @port_guid_wwn: WWN associated with target port GUID. * @port_guid_wwn: WWN associated with target port GUID.
* @port_gid_tpg: TPG associated with target port GID. * @port_gid_tpg: TPG associated with target port GID.
* @port_gid_wwn: WWN associated with target port GID. * @port_gid_wwn: WWN associated with target port GID.
* @port_acl_list: Head of the list with all node ACLs for this port. * @port_attrib: Port attributes that can be accessed through configfs.
*/ */
struct srpt_port { struct srpt_port {
struct srpt_device *sdev; struct srpt_device *sdev;
...@@ -344,7 +352,7 @@ struct srpt_port { ...@@ -344,7 +352,7 @@ struct srpt_port {
}; };
/** /**
* struct srpt_device - Information associated by SRPT with a single HCA. * struct srpt_device - information associated by SRPT with a single HCA
* @device: Backpointer to the struct ib_device managed by the IB core. * @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain. * @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory. * @lkey: L_Key (local key) with write access to all local memory.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment