Commit 56040f07 authored by Roland Dreier's avatar Roland Dreier

Merge branches 'cma', 'ipoib', 'iser', 'mlx4' and 'nes' into for-next

...@@ -79,11 +79,6 @@ int disable_mpa_crc = 0; ...@@ -79,11 +79,6 @@ int disable_mpa_crc = 0;
module_param(disable_mpa_crc, int, 0644); module_param(disable_mpa_crc, int, 0644);
MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
unsigned int send_first = 0;
module_param(send_first, int, 0644);
MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU; unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU;
module_param(nes_drv_opt, int, 0644); module_param(nes_drv_opt, int, 0644);
MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#define QUEUE_DISCONNECTS #define QUEUE_DISCONNECTS
#define DRV_NAME "iw_nes" #define DRV_NAME "iw_nes"
#define DRV_VERSION "1.5.0.0" #define DRV_VERSION "1.5.0.1"
#define PFX DRV_NAME ": " #define PFX DRV_NAME ": "
/* /*
...@@ -172,7 +172,6 @@ extern int interrupt_mod_interval; ...@@ -172,7 +172,6 @@ extern int interrupt_mod_interval;
extern int nes_if_count; extern int nes_if_count;
extern int mpa_version; extern int mpa_version;
extern int disable_mpa_crc; extern int disable_mpa_crc;
extern unsigned int send_first;
extern unsigned int nes_drv_opt; extern unsigned int nes_drv_opt;
extern unsigned int nes_debug_level; extern unsigned int nes_debug_level;
extern unsigned int wqm_quanta; extern unsigned int wqm_quanta;
......
...@@ -3006,6 +3006,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3006,6 +3006,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (nesqp->hw_iwarp_state) { switch (nesqp->hw_iwarp_state) {
case NES_AEQE_IWARP_STATE_CLOSING: case NES_AEQE_IWARP_STATE_CLOSING:
next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
break;
case NES_AEQE_IWARP_STATE_TERMINATE: case NES_AEQE_IWARP_STATE_TERMINATE:
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
break; break;
...@@ -3068,19 +3069,10 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3068,19 +3069,10 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
nesqp->ibqp_state = attr->qp_state; nesqp->ibqp_state = attr->qp_state;
if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) ==
(u32)NES_CQP_QP_IWARP_STATE_RTS) &&
((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) >
(u32)NES_CQP_QP_IWARP_STATE_RTS)) {
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
nesqp->iwarp_state);
} else {
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
nesqp->iwarp_state); nesqp->iwarp_state);
} }
}
if (attr_mask & IB_QP_ACCESS_FLAGS) { if (attr_mask & IB_QP_ACCESS_FLAGS) {
if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) { if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) {
......
...@@ -535,14 +535,14 @@ void ipoib_drain_cq(struct net_device *dev); ...@@ -535,14 +535,14 @@ void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev); void ipoib_set_ethtool_ops(struct net_device *dev);
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca); int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
#define IPOIB_FLAGS_RC 0x80 #define IPOIB_FLAGS_RC 0x80
#define IPOIB_FLAGS_UC 0x40 #define IPOIB_FLAGS_UC 0x40
/* We don't support UC connections at the moment */ /* We don't support UC connections at the moment */
#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC)) #define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
#ifdef CONFIG_INFINIBAND_IPOIB_CM
extern int ipoib_max_conn_qp; extern int ipoib_max_conn_qp;
static inline int ipoib_cm_admin_enabled(struct net_device *dev) static inline int ipoib_cm_admin_enabled(struct net_device *dev)
......
...@@ -1448,37 +1448,6 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr, ...@@ -1448,37 +1448,6 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
return sprintf(buf, "datagram\n"); return sprintf(buf, "datagram\n");
} }
int ipoib_set_mode(struct net_device *dev, const char *buf)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
rtnl_lock();
return 0;
}
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
rtnl_unlock();
ipoib_flush_paths(dev);
rtnl_lock();
return 0;
}
return -EINVAL;
}
static ssize_t set_mode(struct device *d, struct device_attribute *attr, static ssize_t set_mode(struct device *d, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
......
...@@ -215,6 +215,37 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) ...@@ -215,6 +215,37 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
int ipoib_set_mode(struct net_device *dev, const char *buf)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
rtnl_lock();
return 0;
}
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
rtnl_unlock();
ipoib_flush_paths(dev);
rtnl_lock();
return 0;
}
return -EINVAL;
}
static struct ipoib_path *__path_find(struct net_device *dev, void *gid) static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
......
...@@ -177,6 +177,7 @@ struct iser_data_buf { ...@@ -177,6 +177,7 @@ struct iser_data_buf {
/* fwd declarations */ /* fwd declarations */
struct iser_device; struct iser_device;
struct iser_cq_desc;
struct iscsi_iser_conn; struct iscsi_iser_conn;
struct iscsi_iser_task; struct iscsi_iser_task;
struct iscsi_endpoint; struct iscsi_endpoint;
...@@ -226,16 +227,21 @@ struct iser_rx_desc { ...@@ -226,16 +227,21 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE]; char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed)); } __attribute__((packed));
#define ISER_MAX_CQ 4
struct iser_device { struct iser_device {
struct ib_device *ib_device; struct ib_device *ib_device;
struct ib_pd *pd; struct ib_pd *pd;
struct ib_cq *rx_cq; struct ib_cq *rx_cq[ISER_MAX_CQ];
struct ib_cq *tx_cq; struct ib_cq *tx_cq[ISER_MAX_CQ];
struct ib_mr *mr; struct ib_mr *mr;
struct tasklet_struct cq_tasklet; struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */ struct list_head ig_list; /* entry in ig devices list */
int refcount; int refcount;
int cq_active_qps[ISER_MAX_CQ];
int cqs_used;
struct iser_cq_desc *cq_desc;
}; };
struct iser_conn { struct iser_conn {
...@@ -287,6 +293,11 @@ struct iser_page_vec { ...@@ -287,6 +293,11 @@ struct iser_page_vec {
int data_size; int data_size;
}; };
struct iser_cq_desc {
struct iser_device *device;
int cq_index;
};
struct iser_global { struct iser_global {
struct mutex device_list_mutex;/* */ struct mutex device_list_mutex;/* */
struct list_head device_list; /* all iSER devices */ struct list_head device_list; /* all iSER devices */
......
...@@ -70,32 +70,50 @@ static void iser_event_handler(struct ib_event_handler *handler, ...@@ -70,32 +70,50 @@ static void iser_event_handler(struct ib_event_handler *handler,
*/ */
static int iser_create_device_ib_res(struct iser_device *device) static int iser_create_device_ib_res(struct iser_device *device)
{ {
int i, j;
struct iser_cq_desc *cq_desc;
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
device->ib_device->name, device->ib_device->num_comp_vectors);
device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
GFP_KERNEL);
if (device->cq_desc == NULL)
goto cq_desc_err;
cq_desc = device->cq_desc;
device->pd = ib_alloc_pd(device->ib_device); device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd)) if (IS_ERR(device->pd))
goto pd_err; goto pd_err;
device->rx_cq = ib_create_cq(device->ib_device, for (i = 0; i < device->cqs_used; i++) {
cq_desc[i].device = device;
cq_desc[i].cq_index = i;
device->rx_cq[i] = ib_create_cq(device->ib_device,
iser_cq_callback, iser_cq_callback,
iser_cq_event_callback, iser_cq_event_callback,
(void *)device, (void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, 0); ISER_MAX_RX_CQ_LEN, i);
if (IS_ERR(device->rx_cq)) if (IS_ERR(device->rx_cq[i]))
goto rx_cq_err; goto cq_err;
device->tx_cq = ib_create_cq(device->ib_device, device->tx_cq[i] = ib_create_cq(device->ib_device,
NULL, iser_cq_event_callback, NULL, iser_cq_event_callback,
(void *)device, (void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, 0); ISER_MAX_TX_CQ_LEN, i);
if (IS_ERR(device->tx_cq)) if (IS_ERR(device->tx_cq[i]))
goto tx_cq_err; goto cq_err;
if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
goto cq_arm_err; goto cq_err;
tasklet_init(&device->cq_tasklet, tasklet_init(&device->cq_tasklet[i],
iser_cq_tasklet_fn, iser_cq_tasklet_fn,
(unsigned long)device); (unsigned long)&cq_desc[i]);
}
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_WRITE |
...@@ -113,14 +131,19 @@ static int iser_create_device_ib_res(struct iser_device *device) ...@@ -113,14 +131,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
handler_err: handler_err:
ib_dereg_mr(device->mr); ib_dereg_mr(device->mr);
dma_mr_err: dma_mr_err:
tasklet_kill(&device->cq_tasklet); for (j = 0; j < device->cqs_used; j++)
cq_arm_err: tasklet_kill(&device->cq_tasklet[j]);
ib_destroy_cq(device->tx_cq); cq_err:
tx_cq_err: for (j = 0; j < i; j++) {
ib_destroy_cq(device->rx_cq); if (device->tx_cq[j])
rx_cq_err: ib_destroy_cq(device->tx_cq[j]);
if (device->rx_cq[j])
ib_destroy_cq(device->rx_cq[j]);
}
ib_dealloc_pd(device->pd); ib_dealloc_pd(device->pd);
pd_err: pd_err:
kfree(device->cq_desc);
cq_desc_err:
iser_err("failed to allocate an IB resource\n"); iser_err("failed to allocate an IB resource\n");
return -1; return -1;
} }
...@@ -131,18 +154,24 @@ static int iser_create_device_ib_res(struct iser_device *device) ...@@ -131,18 +154,24 @@ static int iser_create_device_ib_res(struct iser_device *device)
*/ */
static void iser_free_device_ib_res(struct iser_device *device) static void iser_free_device_ib_res(struct iser_device *device)
{ {
int i;
BUG_ON(device->mr == NULL); BUG_ON(device->mr == NULL);
tasklet_kill(&device->cq_tasklet); for (i = 0; i < device->cqs_used; i++) {
tasklet_kill(&device->cq_tasklet[i]);
(void)ib_destroy_cq(device->tx_cq[i]);
(void)ib_destroy_cq(device->rx_cq[i]);
device->tx_cq[i] = NULL;
device->rx_cq[i] = NULL;
}
(void)ib_unregister_event_handler(&device->event_handler); (void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr); (void)ib_dereg_mr(device->mr);
(void)ib_destroy_cq(device->tx_cq);
(void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd); (void)ib_dealloc_pd(device->pd);
kfree(device->cq_desc);
device->mr = NULL; device->mr = NULL;
device->tx_cq = NULL;
device->rx_cq = NULL;
device->pd = NULL; device->pd = NULL;
} }
...@@ -157,6 +186,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -157,6 +186,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
struct ib_qp_init_attr init_attr; struct ib_qp_init_attr init_attr;
int req_err, resp_err, ret = -ENOMEM; int req_err, resp_err, ret = -ENOMEM;
struct ib_fmr_pool_param params; struct ib_fmr_pool_param params;
int index, min_index = 0;
BUG_ON(ib_conn->device == NULL); BUG_ON(ib_conn->device == NULL);
...@@ -220,10 +250,20 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -220,10 +250,20 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
memset(&init_attr, 0, sizeof init_attr); memset(&init_attr, 0, sizeof init_attr);
mutex_lock(&ig.connlist_mutex);
/* select the CQ with the minimal number of usages */
for (index = 0; index < device->cqs_used; index++)
if (device->cq_active_qps[index] <
device->cq_active_qps[min_index])
min_index = index;
device->cq_active_qps[min_index]++;
mutex_unlock(&ig.connlist_mutex);
iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
init_attr.event_handler = iser_qp_event_callback; init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn; init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = device->tx_cq; init_attr.send_cq = device->tx_cq[min_index];
init_attr.recv_cq = device->rx_cq; init_attr.recv_cq = device->rx_cq[min_index];
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2; init_attr.cap.max_send_sge = 2;
...@@ -252,6 +292,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -252,6 +292,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
*/ */
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{ {
int cq_index;
BUG_ON(ib_conn == NULL); BUG_ON(ib_conn == NULL);
iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
...@@ -262,9 +303,12 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) ...@@ -262,9 +303,12 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
if (ib_conn->fmr_pool != NULL) if (ib_conn->fmr_pool != NULL)
ib_destroy_fmr_pool(ib_conn->fmr_pool); ib_destroy_fmr_pool(ib_conn->fmr_pool);
if (ib_conn->qp != NULL) if (ib_conn->qp != NULL) {
rdma_destroy_qp(ib_conn->cma_id); cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
ib_conn->device->cq_active_qps[cq_index]--;
rdma_destroy_qp(ib_conn->cma_id);
}
/* if cma handler context, the caller acts s.t the cma destroy the id */ /* if cma handler context, the caller acts s.t the cma destroy the id */
if (ib_conn->cma_id != NULL && can_destroy_id) if (ib_conn->cma_id != NULL && can_destroy_id)
rdma_destroy_id(ib_conn->cma_id); rdma_destroy_id(ib_conn->cma_id);
...@@ -791,9 +835,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, ...@@ -791,9 +835,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
} }
} }
static int iser_drain_tx_cq(struct iser_device *device) static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
{ {
struct ib_cq *cq = device->tx_cq; struct ib_cq *cq = device->tx_cq[cq_index];
struct ib_wc wc; struct ib_wc wc;
struct iser_tx_desc *tx_desc; struct iser_tx_desc *tx_desc;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
...@@ -822,8 +866,10 @@ static int iser_drain_tx_cq(struct iser_device *device) ...@@ -822,8 +866,10 @@ static int iser_drain_tx_cq(struct iser_device *device)
static void iser_cq_tasklet_fn(unsigned long data) static void iser_cq_tasklet_fn(unsigned long data)
{ {
struct iser_device *device = (struct iser_device *)data; struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
struct ib_cq *cq = device->rx_cq; struct iser_device *device = cq_desc->device;
int cq_index = cq_desc->cq_index;
struct ib_cq *cq = device->rx_cq[cq_index];
struct ib_wc wc; struct ib_wc wc;
struct iser_rx_desc *desc; struct iser_rx_desc *desc;
unsigned long xfer_len; unsigned long xfer_len;
...@@ -851,19 +897,21 @@ static void iser_cq_tasklet_fn(unsigned long data) ...@@ -851,19 +897,21 @@ static void iser_cq_tasklet_fn(unsigned long data)
} }
completed_rx++; completed_rx++;
if (!(completed_rx & 63)) if (!(completed_rx & 63))
completed_tx += iser_drain_tx_cq(device); completed_tx += iser_drain_tx_cq(device, cq_index);
} }
/* #warning "it is assumed here that arming CQ only once its empty" * /* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */ * " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
completed_tx += iser_drain_tx_cq(device); completed_tx += iser_drain_tx_cq(device, cq_index);
iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
} }
static void iser_cq_callback(struct ib_cq *cq, void *cq_context) static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{ {
struct iser_device *device = (struct iser_device *)cq_context; struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
struct iser_device *device = cq_desc->device;
int cq_index = cq_desc->cq_index;
tasklet_schedule(&device->cq_tasklet); tasklet_schedule(&device->cq_tasklet[cq_index]);
} }
...@@ -3094,6 +3094,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3094,6 +3094,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
if (validate_eth_header_mac(slave, rule_header, rlist)) if (validate_eth_header_mac(slave, rule_header, rlist))
return -EINVAL; return -EINVAL;
break; break;
case MLX4_NET_TRANS_RULE_ID_IB:
break;
case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP: case MLX4_NET_TRANS_RULE_ID_UDP:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment