Commit 75318ec3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB: Fix information leak in marshalling code
  IB/pack: Remove some unused code added by the IBoE patches
  IB/mlx4: Fix IBoE link state
  IB/mlx4: Fix IBoE reported link rate
  mlx4_core: Workaround firmware bug in query dev cap
  IB/mlx4: Fix memory ordering of VLAN insertion control bits
  MAINTAINERS: Update NetEffect entry
parents 8cb280c9 7adce751
...@@ -4064,9 +4064,8 @@ F: drivers/scsi/NCR_D700.* ...@@ -4064,9 +4064,8 @@ F: drivers/scsi/NCR_D700.*
NETEFFECT IWARP RNIC DRIVER (IW_NES) NETEFFECT IWARP RNIC DRIVER (IW_NES)
M: Faisal Latif <faisal.latif@intel.com> M: Faisal Latif <faisal.latif@intel.com>
M: Chien Tung <chien.tin.tung@intel.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
W: http://www.neteffect.com W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm
S: Supported S: Supported
F: drivers/infiniband/hw/nes/ F: drivers/infiniband/hw/nes/
......
...@@ -277,36 +277,6 @@ void ib_ud_header_init(int payload_bytes, ...@@ -277,36 +277,6 @@ void ib_ud_header_init(int payload_bytes,
} }
EXPORT_SYMBOL(ib_ud_header_init); EXPORT_SYMBOL(ib_ud_header_init);
/**
* ib_lrh_header_pack - Pack LRH header struct into wire format
* @lrh:unpacked LRH header struct
* @buf:Buffer to pack into
*
* ib_lrh_header_pack() packs the LRH header structure @lrh into
* wire format in the buffer @buf.
*/
int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf)
{
ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf);
return 0;
}
EXPORT_SYMBOL(ib_lrh_header_pack);
/**
* ib_lrh_header_unpack - Unpack LRH structure from wire format
* @lrh:unpacked LRH header struct
* @buf:Buffer to pack into
*
* ib_lrh_header_unpack() unpacks the LRH header structure from
* wire format (in buf) into @lrh.
*/
int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh)
{
ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh);
return 0;
}
EXPORT_SYMBOL(ib_lrh_header_unpack);
/** /**
* ib_ud_header_pack - Pack UD header struct into wire format * ib_ud_header_pack - Pack UD header struct into wire format
* @header:UD header struct * @header:UD header struct
......
...@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, ...@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
dst->grh.sgid_index = src->grh.sgid_index; dst->grh.sgid_index = src->grh.sgid_index;
dst->grh.hop_limit = src->grh.hop_limit; dst->grh.hop_limit = src->grh.hop_limit;
dst->grh.traffic_class = src->grh.traffic_class; dst->grh.traffic_class = src->grh.traffic_class;
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
dst->dlid = src->dlid; dst->dlid = src->dlid;
dst->sl = src->sl; dst->sl = src->sl;
dst->src_path_bits = src->src_path_bits; dst->src_path_bits = src->src_path_bits;
dst->static_rate = src->static_rate; dst->static_rate = src->static_rate;
dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
dst->port_num = src->port_num; dst->port_num = src->port_num;
dst->reserved = 0;
} }
EXPORT_SYMBOL(ib_copy_ah_attr_to_user); EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src) struct ib_qp_attr *src)
{ {
dst->qp_state = src->qp_state;
dst->cur_qp_state = src->cur_qp_state; dst->cur_qp_state = src->cur_qp_state;
dst->path_mtu = src->path_mtu; dst->path_mtu = src->path_mtu;
dst->path_mig_state = src->path_mig_state; dst->path_mig_state = src->path_mig_state;
...@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, ...@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
dst->rnr_retry = src->rnr_retry; dst->rnr_retry = src->rnr_retry;
dst->alt_port_num = src->alt_port_num; dst->alt_port_num = src->alt_port_num;
dst->alt_timeout = src->alt_timeout; dst->alt_timeout = src->alt_timeout;
memset(dst->reserved, 0, sizeof(dst->reserved));
} }
EXPORT_SYMBOL(ib_copy_qp_attr_to_user); EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
......
...@@ -219,7 +219,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, ...@@ -219,7 +219,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
struct net_device *ndev; struct net_device *ndev;
enum ib_mtu tmp; enum ib_mtu tmp;
props->active_width = IB_WIDTH_4X; props->active_width = IB_WIDTH_1X;
props->active_speed = 4; props->active_speed = 4;
props->port_cap_flags = IB_PORT_CM_SUP; props->port_cap_flags = IB_PORT_CM_SUP;
props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
...@@ -242,7 +242,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, ...@@ -242,7 +242,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
tmp = iboe_get_mtu(ndev->mtu); tmp = iboe_get_mtu(ndev->mtu);
props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
props->state = netif_running(ndev) && netif_oper_up(ndev) ? props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN; IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state); props->phys_state = state_to_phys_state(props->state);
......
...@@ -1816,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1816,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
MLX4_WQE_CTRL_FENCE : 0) | size; MLX4_WQE_CTRL_FENCE : 0) | size;
if (be16_to_cpu(vlan) < 0x1000) {
ctrl->ins_vlan = 1 << 6;
ctrl->vlan_tag = vlan;
}
/* /*
* Make sure descriptor is fully written before * Make sure descriptor is fully written before
* setting ownership bit (because HW can start * setting ownership bit (because HW can start
...@@ -1831,11 +1836,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1831,11 +1836,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
if (be16_to_cpu(vlan) < 0x1000) {
ctrl->ins_vlan = 1 << 6;
ctrl->vlan_tag = vlan;
}
stamp = ind + qp->sq_spare_wqes; stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
......
...@@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f); dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
field = 3;
}
dev_cap->bf_regs_per_page = 1 << (field & 0x3f); dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment