Commit 2b17992f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "This is a bit later than usual for our first -rc but I'm not seeing
  anything worry-some in the RDMA tree right now. Quiet so far this -rc
  cycle, only a few internal driver related bugs and a small series
  fixing ODP bugs found by more advanced testing.

  A set of small driver and core code fixes:

   - Small series fixing longtime user triggerable bugs in the ODP
     processing inside mlx5 and core code

   - Various small driver malfunctions and crashes (use after, free,
     error unwind, implementation bugs)

   - A misfunction of the RDMA GID cache that can be triggered by the
     administrator"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mlx5: Initialize return variable in case pagefault was skipped
  IB/mlx5: Fix page fault handling for MW
  IB/umem: Set correct address to the invalidation function
  IB/mlx5: Skip non-ODP MR when handling a page fault
  RDMA/hns: Bugfix pbl configuration for rereg mr
  iser: set sector for ambiguous mr status errors
  RDMA/rdmavt: Fix rvt_create_ah function signature
  IB/mlx5: Avoid load failure due to unknown link width
  IB/mlx5: Fix XRC QP support after introducing extended atomic
  RDMA/bnxt_re: Avoid accessing the device structure after it is freed
  RDMA/bnxt_re: Fix system hang when registration with L2 driver fails
  RDMA/core: Add GIDs while changing MAC addr only for registered ndev
  RDMA/mlx5: Fix fence type for IB_WR_LOCAL_INV WR
  net/mlx5: Fix XRC SRQ umem valid bits
parents 94f371cb 7bca603a
...@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event, ...@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
cmds[0] = netdev_del_cmd; cmds[0] = netdev_del_cmd;
cmds[1] = add_default_gid_cmd; if (ndev->reg_state == NETREG_REGISTERED) {
cmds[2] = add_cmd; cmds[1] = add_default_gid_cmd;
cmds[2] = add_cmd;
}
break; break;
case NETDEV_CHANGEUPPER: case NETDEV_CHANGEUPPER:
......
...@@ -137,15 +137,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn, ...@@ -137,15 +137,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
up_read(&per_mm->umem_rwsem); up_read(&per_mm->umem_rwsem);
} }
static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
ib_umem_notifier_end_account(item);
return 0;
}
static int invalidate_range_start_trampoline(struct ib_umem_odp *item, static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
u64 start, u64 end, void *cookie) u64 start, u64 end, void *cookie)
{ {
...@@ -553,12 +544,13 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -553,12 +544,13 @@ static int ib_umem_odp_map_dma_single_page(
put_page(page); put_page(page);
if (remove_existing_mapping && umem->context->invalidate_range) { if (remove_existing_mapping && umem->context->invalidate_range) {
invalidate_page_trampoline( ib_umem_notifier_start_account(umem_odp);
umem->context->invalidate_range(
umem_odp, umem_odp,
ib_umem_start(umem) + (page_index >> umem->page_shift), ib_umem_start(umem) + (page_index << umem->page_shift),
ib_umem_start(umem) + ((page_index + 1) >> ib_umem_start(umem) +
umem->page_shift), ((page_index + 1) << umem->page_shift));
NULL); ib_umem_notifier_end_account(umem_odp);
ret = -EAGAIN; ret = -EAGAIN;
} }
......
...@@ -1268,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) ...@@ -1268,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Registered a new RoCE device instance to netdev */ /* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev); rc = bnxt_re_register_netdev(rdev);
if (rc) { if (rc) {
rtnl_unlock();
pr_err("Failed to register with netedev: %#x\n", rc); pr_err("Failed to register with netedev: %#x\n", rc);
return -EINVAL; return -EINVAL;
} }
...@@ -1466,6 +1467,7 @@ static void bnxt_re_task(struct work_struct *work) ...@@ -1466,6 +1467,7 @@ static void bnxt_re_task(struct work_struct *work)
"Failed to register with IB: %#x", rc); "Failed to register with IB: %#x", rc);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev); bnxt_re_dev_unreg(rdev);
goto exit;
} }
break; break;
case NETDEV_UP: case NETDEV_UP:
...@@ -1489,6 +1491,7 @@ static void bnxt_re_task(struct work_struct *work) ...@@ -1489,6 +1491,7 @@ static void bnxt_re_task(struct work_struct *work)
} }
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_dec(&rdev->sched_count); atomic_dec(&rdev->sched_count);
exit:
kfree(re_work); kfree(re_work);
} }
......
...@@ -1756,10 +1756,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, ...@@ -1756,10 +1756,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1); return hns_roce_cmq_send(hr_dev, &desc, 1);
} }
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
unsigned long mtpt_idx) struct hns_roce_mr *mr)
{ {
struct hns_roce_v2_mpt_entry *mpt_entry;
struct scatterlist *sg; struct scatterlist *sg;
u64 page_addr; u64 page_addr;
u64 *pages; u64 *pages;
...@@ -1767,6 +1766,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1767,6 +1766,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
int len; int len;
int entry; int entry;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba,
V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
pages = (u64 *)__get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; ++j) {
page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift);
pages[i] = page_addr >> 6;
/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found;
i++;
}
}
found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
free_page((unsigned long)pages);
return 0;
}
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
int ret;
mpt_entry = mb_buf; mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry)); memset(mpt_entry, 0, sizeof(*mpt_entry));
...@@ -1781,7 +1827,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1781,7 +1827,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET); mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd); V2_MPT_BYTE_4_PD_S, mr->pd);
mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
...@@ -1796,13 +1841,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1796,13 +1841,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
(mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
(mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
mr->type == MR_TYPE_MR ? 0 : 1); mr->type == MR_TYPE_MR ? 0 : 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1); 1);
mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
...@@ -1813,53 +1856,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1813,53 +1856,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA) if (mr->type == MR_TYPE_DMA)
return 0; return 0;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size); ret = set_mtpt_pbl(mpt_entry, mr);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
pages = (u64 *)__get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; ++j) {
page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift);
pages[i] = page_addr >> 6;
/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found;
i++;
}
}
found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S,
upper_32_bits(pages[0]));
mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
free_page((unsigned long)pages); return ret;
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
return 0;
} }
static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
...@@ -1868,6 +1867,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, ...@@ -1868,6 +1867,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
u64 size, void *mb_buf) u64 size, void *mb_buf)
{ {
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
int ret = 0;
if (flags & IB_MR_REREG_PD) { if (flags & IB_MR_REREG_PD) {
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
...@@ -1880,14 +1880,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, ...@@ -1880,14 +1880,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
V2_MPT_BYTE_8_BIND_EN_S, V2_MPT_BYTE_8_BIND_EN_S,
(mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_ATOMIC_EN_S, V2_MPT_BYTE_8_ATOMIC_EN_S,
(mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0)); mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0)); mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
(mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
(mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
} }
if (flags & IB_MR_REREG_TRANS) { if (flags & IB_MR_REREG_TRANS) {
...@@ -1896,21 +1896,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, ...@@ -1896,21 +1896,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->len_l = cpu_to_le32(lower_32_bits(size)); mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(size)); mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l =
cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba,
V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
mpt_entry->byte_48_mode_ba =
cpu_to_le32(mpt_entry->byte_48_mode_ba);
mr->iova = iova; mr->iova = iova;
mr->size = size; mr->size = size;
ret = set_mtpt_pbl(mpt_entry, mr);
} }
return 0; return ret;
} }
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
......
...@@ -1094,31 +1094,26 @@ enum mlx5_ib_width { ...@@ -1094,31 +1094,26 @@ enum mlx5_ib_width {
MLX5_IB_WIDTH_12X = 1 << 4 MLX5_IB_WIDTH_12X = 1 << 4
}; };
static int translate_active_width(struct ib_device *ibdev, u8 active_width, static void translate_active_width(struct ib_device *ibdev, u8 active_width,
u8 *ib_width) u8 *ib_width)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
int err = 0;
if (active_width & MLX5_IB_WIDTH_1X) { if (active_width & MLX5_IB_WIDTH_1X)
*ib_width = IB_WIDTH_1X; *ib_width = IB_WIDTH_1X;
} else if (active_width & MLX5_IB_WIDTH_2X) { else if (active_width & MLX5_IB_WIDTH_4X)
mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
(int)active_width);
err = -EINVAL;
} else if (active_width & MLX5_IB_WIDTH_4X) {
*ib_width = IB_WIDTH_4X; *ib_width = IB_WIDTH_4X;
} else if (active_width & MLX5_IB_WIDTH_8X) { else if (active_width & MLX5_IB_WIDTH_8X)
*ib_width = IB_WIDTH_8X; *ib_width = IB_WIDTH_8X;
} else if (active_width & MLX5_IB_WIDTH_12X) { else if (active_width & MLX5_IB_WIDTH_12X)
*ib_width = IB_WIDTH_12X; *ib_width = IB_WIDTH_12X;
} else { else {
mlx5_ib_dbg(dev, "Invalid active_width %d\n", mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
(int)active_width); (int)active_width);
err = -EINVAL; *ib_width = IB_WIDTH_4X;
} }
return err; return;
} }
static int mlx5_mtu_to_ib_mtu(int mtu) static int mlx5_mtu_to_ib_mtu(int mtu)
...@@ -1225,10 +1220,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, ...@@ -1225,10 +1220,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
if (err) if (err)
goto out; goto out;
err = translate_active_width(ibdev, ib_link_width_oper, translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
&props->active_width);
if (err)
goto out;
err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
if (err) if (err)
goto out; goto out;
......
...@@ -674,6 +674,15 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, ...@@ -674,6 +674,15 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
goto srcu_unlock; goto srcu_unlock;
} }
if (!mr->umem->is_odp) {
mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
key);
if (bytes_mapped)
*bytes_mapped += bcnt;
ret = 0;
goto srcu_unlock;
}
ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped); ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped);
if (ret < 0) if (ret < 0)
goto srcu_unlock; goto srcu_unlock;
...@@ -735,6 +744,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, ...@@ -735,6 +744,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
head = frame; head = frame;
bcnt -= frame->bcnt; bcnt -= frame->bcnt;
offset = 0;
} }
break; break;
......
...@@ -2633,8 +2633,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp, ...@@ -2633,8 +2633,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (access_flags & IB_ACCESS_REMOTE_READ) if (access_flags & IB_ACCESS_REMOTE_READ)
*hw_access_flags |= MLX5_QP_BIT_RRE; *hw_access_flags |= MLX5_QP_BIT_RRE;
if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) && if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
qp->ibqp.qp_type == IB_QPT_RC) {
int atomic_mode; int atomic_mode;
atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type); atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
...@@ -4678,17 +4677,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4678,17 +4677,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out; goto out;
} }
if (wr->opcode == IB_WR_LOCAL_INV || if (wr->opcode == IB_WR_REG_MR) {
wr->opcode == IB_WR_REG_MR) {
fence = dev->umr_fence; fence = dev->umr_fence;
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
} else if (wr->send_flags & IB_SEND_FENCE) { } else {
if (qp->next_fence) if (wr->send_flags & IB_SEND_FENCE) {
fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; if (qp->next_fence)
else fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
fence = MLX5_FENCE_MODE_FENCE; else
} else { fence = MLX5_FENCE_MODE_FENCE;
fence = qp->next_fence; } else {
fence = qp->next_fence;
}
} }
switch (ibqp->qp_type) { switch (ibqp->qp_type) {
......
...@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah); ...@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
* rvt_create_ah - create an address handle * rvt_create_ah - create an address handle
* @pd: the protection domain * @pd: the protection domain
* @ah_attr: the attributes of the AH * @ah_attr: the attributes of the AH
* @udata: pointer to user's input output buffer information.
* *
* This may be called from interrupt context. * This may be called from interrupt context.
* *
* Return: newly allocated ah * Return: newly allocated ah
*/ */
struct ib_ah *rvt_create_ah(struct ib_pd *pd, struct ib_ah *rvt_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr) struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{ {
struct rvt_ah *ah; struct rvt_ah *ah;
struct rvt_dev_info *dev = ib_to_rvt(pd->device); struct rvt_dev_info *dev = ib_to_rvt(pd->device);
......
...@@ -51,7 +51,8 @@ ...@@ -51,7 +51,8 @@
#include <rdma/rdma_vt.h> #include <rdma/rdma_vt.h>
struct ib_ah *rvt_create_ah(struct ib_pd *pd, struct ib_ah *rvt_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr); struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int rvt_destroy_ah(struct ib_ah *ibah); int rvt_destroy_ah(struct ib_ah *ibah);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
......
...@@ -1124,7 +1124,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, ...@@ -1124,7 +1124,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
IB_MR_CHECK_SIG_STATUS, &mr_status); IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) { if (ret) {
pr_err("ib_check_mr_status failed, ret %d\n", ret); pr_err("ib_check_mr_status failed, ret %d\n", ret);
goto err; /* Not a lot we can do, return ambiguous guard error */
*sector = 0;
return 0x1;
} }
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
...@@ -1152,9 +1154,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, ...@@ -1152,9 +1154,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
} }
return 0; return 0;
err:
/* Not alot we can do here, return ambiguous guard error */
return 0x1;
} }
void iser_err_comp(struct ib_wc *wc, const char *type) void iser_err_comp(struct ib_wc *wc, const char *type)
......
...@@ -2473,14 +2473,15 @@ struct mlx5_ifc_xrc_srqc_bits { ...@@ -2473,14 +2473,15 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 wq_signature[0x1]; u8 wq_signature[0x1];
u8 cont_srq[0x1]; u8 cont_srq[0x1];
u8 dbr_umem_valid[0x1]; u8 reserved_at_22[0x1];
u8 rlky[0x1]; u8 rlky[0x1];
u8 basic_cyclic_rcv_wqe[0x1]; u8 basic_cyclic_rcv_wqe[0x1];
u8 log_rq_stride[0x3]; u8 log_rq_stride[0x3];
u8 xrcd[0x18]; u8 xrcd[0x18];
u8 page_offset[0x6]; u8 page_offset[0x6];
u8 reserved_at_46[0x2]; u8 reserved_at_46[0x1];
u8 dbr_umem_valid[0x1];
u8 cqn[0x18]; u8 cqn[0x18];
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
...@@ -6689,9 +6690,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits { ...@@ -6689,9 +6690,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
u8 reserved_at_280[0x40]; u8 reserved_at_280[0x60];
u8 xrc_srq_umem_valid[0x1]; u8 xrc_srq_umem_valid[0x1];
u8 reserved_at_2c1[0x5bf]; u8 reserved_at_2e1[0x1f];
u8 reserved_at_300[0x580];
u8 pas[0][0x40]; u8 pas[0][0x40];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment