Commit 3023a1e9 authored by Kamal Heib's avatar Kamal Heib Committed by Jason Gunthorpe

RDMA: Start use ib_device_ops

Make all the required change to start use the ib_device_ops structure.
Signed-off-by: default avatarKamal Heib <kamalheib1@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 02a42f8e
......@@ -217,7 +217,7 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
if (rdma_cap_roce_gid_table(device, port_num) &&
entry->state != GID_TABLE_ENTRY_INVALID)
device->del_gid(&entry->attr, &entry->context);
device->ops.del_gid(&entry->attr, &entry->context);
write_lock_irq(&table->rwlock);
......@@ -324,7 +324,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
return -EINVAL;
}
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
ret = attr->device->add_gid(attr, &entry->context);
ret = attr->device->ops.add_gid(attr, &entry->context);
if (ret) {
dev_err(&attr->device->dev,
"%s GID add failed port=%d index=%d\n",
......@@ -548,8 +548,8 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
unsigned long mask;
int ret;
if (ib_dev->get_netdev) {
idev = ib_dev->get_netdev(ib_dev, port);
if (ib_dev->ops.get_netdev) {
idev = ib_dev->ops.get_netdev(ib_dev, port);
if (idev && attr->ndev != idev) {
union ib_gid default_gid;
......@@ -1296,9 +1296,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
mutex_lock(&table->lock);
for (i = 0; i < gid_tbl_len; ++i) {
if (!device->query_gid)
if (!device->ops.query_gid)
continue;
ret = device->query_gid(device, port, i, &gid_attr.gid);
ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
if (ret) {
dev_warn(&device->dev,
"query_gid failed (%d) for index %d\n", ret,
......
......@@ -215,10 +215,10 @@ static inline int ib_security_modify_qp(struct ib_qp *qp,
int qp_attr_mask,
struct ib_udata *udata)
{
return qp->device->modify_qp(qp->real_qp,
qp_attr,
qp_attr_mask,
udata);
return qp->device->ops.modify_qp(qp->real_qp,
qp_attr,
qp_attr_mask,
udata);
}
static inline int ib_create_qp_security(struct ib_qp *qp,
......@@ -280,10 +280,10 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
{
struct ib_qp *qp;
if (!dev->create_qp)
if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP);
qp = dev->create_qp(pd, attr, udata);
qp = dev->ops.create_qp(pd, attr, udata);
if (IS_ERR(qp))
return qp;
......
......@@ -145,7 +145,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
struct ib_cq *cq;
int ret = -ENOMEM;
cq = dev->create_cq(dev, &cq_attr, NULL, NULL);
cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL);
if (IS_ERR(cq))
return cq;
......@@ -193,7 +193,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
kfree(cq->wc);
rdma_restrack_del(&cq->res);
out_destroy_cq:
cq->device->destroy_cq(cq);
cq->device->ops.destroy_cq(cq);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(__ib_alloc_cq);
......@@ -225,7 +225,7 @@ void ib_free_cq(struct ib_cq *cq)
kfree(cq->wc);
rdma_restrack_del(&cq->res);
ret = cq->device->destroy_cq(cq);
ret = cq->device->ops.destroy_cq(cq);
WARN_ON_ONCE(ret);
}
EXPORT_SYMBOL(ib_free_cq);
This diff is collapsed.
......@@ -211,8 +211,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
!device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
dev_info(&device->dev, "Device does not support FMRs\n");
return ERR_PTR(-ENOSYS);
}
......
......@@ -888,10 +888,10 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
}
/* No GRH for DR SMP */
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
(const struct ib_mad_hdr *)smp, mad_size,
(struct ib_mad_hdr *)mad_priv->mad,
&mad_size, &out_mad_pkey_index);
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
(const struct ib_mad_hdr *)smp, mad_size,
(struct ib_mad_hdr *)mad_priv->mad,
&mad_size, &out_mad_pkey_index);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
......@@ -2305,14 +2305,12 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
/* Give driver "right of first refusal" on incoming MAD */
if (port_priv->device->process_mad) {
ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num,
wc, &recv->grh,
(const struct ib_mad_hdr *)recv->mad,
recv->mad_size,
(struct ib_mad_hdr *)response->mad,
&mad_size, &resp_mad_pkey_index);
if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc,
&recv->grh, (const struct ib_mad_hdr *)recv->mad,
recv->mad_size, (struct ib_mad_hdr *)response->mad,
&mad_size, &resp_mad_pkey_index);
if (opa)
wc->pkey_index = resp_mad_pkey_index;
......
......@@ -259,8 +259,8 @@ static int fill_port_info(struct sk_buff *msg,
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
return -EMSGSIZE;
if (device->get_netdev)
netdev = device->get_netdev(device, port);
if (device->ops.get_netdev)
netdev = device->ops.get_netdev(device, port);
if (netdev && net_eq(dev_net(netdev), net)) {
ret = nla_put_u32(msg,
......
......@@ -55,7 +55,7 @@ static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
return (device->process_mad &&
return (device->ops.process_mad &&
!opa_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD;
......@@ -70,7 +70,7 @@ static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
return (device->process_mad &&
return (device->ops.process_mad &&
opa_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
}
......
......@@ -820,8 +820,8 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
*/
if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
uverbs_user_mmap_disassociate(ufile);
if (ib_dev->disassociate_ucontext)
ib_dev->disassociate_ucontext(ucontext);
if (ib_dev->ops.disassociate_ucontext)
ib_dev->ops.disassociate_ucontext(ucontext);
}
ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
......@@ -833,7 +833,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
* FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
* the error return.
*/
ret = ib_dev->dealloc_ucontext(ucontext);
ret = ib_dev->ops.dealloc_ucontext(ucontext);
WARN_ON(ret);
ufile->ucontext = NULL;
......
......@@ -626,10 +626,10 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
if (!ret)
ret = real_qp->device->modify_qp(real_qp,
qp_attr,
qp_attr_mask,
udata);
ret = real_qp->device->ops.modify_qp(real_qp,
qp_attr,
qp_attr_mask,
udata);
if (new_pps) {
/* Clean up the lists and free the appropriate
......
......@@ -67,7 +67,7 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
return ((device->process_mad &&
return ((device->ops.process_mad &&
!ib_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
......@@ -82,7 +82,7 @@ static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
return ((device->process_mad &&
return ((device->ops.process_mad &&
ib_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
......
......@@ -462,7 +462,7 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
u16 out_mad_pkey_index = 0;
ssize_t ret;
if (!dev->process_mad)
if (!dev->ops.process_mad)
return -ENOSYS;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
......@@ -481,11 +481,11 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */
if ((dev->process_mad(dev, IB_MAD_IGNORE_MKEY,
port_num, NULL, NULL,
(const struct ib_mad_hdr *)in_mad, mad_size,
(struct ib_mad_hdr *)out_mad, &mad_size,
&out_mad_pkey_index) &
if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
port_num, NULL, NULL,
(const struct ib_mad_hdr *)in_mad, mad_size,
(struct ib_mad_hdr *)out_mad, &mad_size,
&out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
ret = -EINVAL;
......@@ -786,7 +786,7 @@ static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
return 0;
ret = dev->get_hw_stats(dev, stats, port_num, index);
ret = dev->ops.get_hw_stats(dev, stats, port_num, index);
if (ret < 0)
return ret;
if (ret == stats->num_counters)
......@@ -946,7 +946,7 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
struct rdma_hw_stats *stats;
int i, ret;
stats = device->alloc_hw_stats(device, port_num);
stats = device->ops.alloc_hw_stats(device, port_num);
if (!stats)
return;
......@@ -964,8 +964,8 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
if (!hsag)
goto err_free_stats;
ret = device->get_hw_stats(device, stats, port_num,
stats->num_counters);
ret = device->ops.get_hw_stats(device, stats, port_num,
stats->num_counters);
if (ret != stats->num_counters)
goto err_free_hsag;
......@@ -1057,7 +1057,7 @@ static int add_port(struct ib_device *device, int port_num,
goto err_put;
}
if (device->process_mad) {
if (device->ops.process_mad) {
p->pma_table = get_counter_table(device, port_num);
ret = sysfs_create_group(&p->kobj, p->pma_table);
if (ret)
......@@ -1124,7 +1124,7 @@ static int add_port(struct ib_device *device, int port_num,
* port, so holder should be device. Therefore skip per port conunter
* initialization.
*/
if (device->alloc_hw_stats && port_num)
if (device->ops.alloc_hw_stats && port_num)
setup_hw_stats(device, p, port_num);
list_add_tail(&p->kobj.entry, &device->port_list);
......@@ -1245,7 +1245,7 @@ static ssize_t node_desc_store(struct device *device,
struct ib_device_modify desc = {};
int ret;
if (!dev->modify_device)
if (!dev->ops.modify_device)
return -EIO;
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
......@@ -1341,7 +1341,7 @@ int ib_device_register_sysfs(struct ib_device *device,
}
}
if (device->alloc_hw_stats)
if (device->ops.alloc_hw_stats)
setup_hw_stats(device, NULL, 0);
return 0;
......
......@@ -1242,7 +1242,7 @@ static void ib_ucm_add_one(struct ib_device *device)
dev_t base;
struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1))
if (!device->ops.alloc_ucontext || !rdma_cap_ib_cm(device, 1))
return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
......
......@@ -220,7 +220,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
if (ret)
goto err;
ucontext = ib_dev->alloc_ucontext(ib_dev, &attrs->driver_udata);
ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata);
if (IS_ERR(ucontext)) {
ret = PTR_ERR(ucontext);
goto err_alloc;
......@@ -282,7 +282,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
put_unused_fd(resp.async_fd);
err_free:
ib_dev->dealloc_ucontext(ucontext);
ib_dev->ops.dealloc_ucontext(ucontext);
err_alloc:
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
......@@ -457,7 +457,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
if (IS_ERR(uobj))
return PTR_ERR(uobj);
pd = ib_dev->alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
......@@ -634,8 +634,8 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
}
if (!xrcd) {
xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context,
&attrs->driver_udata);
xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context,
&attrs->driver_udata);
if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd);
goto err;
......@@ -774,8 +774,9 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
}
}
mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags, &attrs->driver_udata);
mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
cmd.access_flags,
&attrs->driver_udata);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
goto err_put;
......@@ -864,9 +865,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
}
old_pd = mr->pd;
ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
cmd.hca_va, cmd.access_flags, pd,
&attrs->driver_udata);
ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
cmd.length, cmd.hca_va,
cmd.access_flags, pd,
&attrs->driver_udata);
if (!ret) {
if (cmd.flags & IB_MR_REREG_PD) {
atomic_inc(&pd->usecnt);
......@@ -929,7 +931,7 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_free;
}
mw = pd->device->alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
if (IS_ERR(mw)) {
ret = PTR_ERR(mw);
goto err_put;
......@@ -1043,8 +1045,8 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
attr.comp_vector = cmd->comp_vector;
attr.flags = cmd->flags;
cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context,
&attrs->driver_udata);
cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
&attrs->driver_udata);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_file;
......@@ -1144,7 +1146,7 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
if (!cq)
return -EINVAL;
ret = cq->device->resize_cq(cq, cmd.cqe, &attrs->driver_udata);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
goto out;
......@@ -2188,7 +2190,7 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
}
resp.bad_wr = 0;
ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
if (ret)
for (next = wr; next; next = next->next) {
++resp.bad_wr;
......@@ -2341,7 +2343,7 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
}
resp.bad_wr = 0;
ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
uobj_put_obj_read(qp);
if (ret) {
......@@ -2391,7 +2393,7 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
}
resp.bad_wr = 0;
ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
uobj_put_obj_read(srq);
......@@ -2961,7 +2963,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
wq = pd->device->create_wq(pd, &wq_init_attr, &attrs->driver_udata);
wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
if (IS_ERR(wq)) {
err = PTR_ERR(wq);
goto err_put_cq;
......@@ -3061,8 +3063,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask;
}
ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask,
&attrs->driver_udata);
ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
&attrs->driver_udata);
uobj_put_obj_read(wq);
return ret;
}
......@@ -3135,8 +3137,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
init_attr.ind_tbl = wqs;
rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr,
&attrs->driver_udata);
rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
&attrs->driver_udata);
if (IS_ERR(rwq_ind_tbl)) {
err = PTR_ERR(rwq_ind_tbl);
......@@ -3323,8 +3325,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
goto err_free;
}
flow_id = qp->device->create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER,
&attrs->driver_udata);
flow_id = qp->device->ops.create_flow(
qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
......@@ -3346,7 +3348,7 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
kfree(kern_flow_attr);
return uobj_alloc_commit(uobj);
err_copy:
if (!qp->device->destroy_flow(flow_id))
if (!qp->device->ops.destroy_flow(flow_id))
atomic_dec(&qp->usecnt);
err_free:
ib_uverbs_flow_resources_free(uflow_res);
......@@ -3441,7 +3443,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
srq = pd->device->create_srq(pd, &attr, udata);
srq = pd->device->ops.create_srq(pd, &attr, udata);
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
goto err_put;
......@@ -3563,8 +3565,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask,
&attrs->driver_udata);
ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
&attrs->driver_udata);
uobj_put_obj_read(srq);
......@@ -3652,7 +3654,7 @@ static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
if (cmd.reserved)
return -EINVAL;
err = ib_dev->query_device(ib_dev, &attr, &attrs->driver_udata);
err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
if (err)
return err;
......
......@@ -106,7 +106,7 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
struct ib_pd *pd = mw->pd;
int ret;
ret = mw->device->dealloc_mw(mw);
ret = mw->device->ops.dealloc_mw(mw);
if (!ret)
atomic_dec(&pd->usecnt);
return ret;
......@@ -197,7 +197,7 @@ void ib_uverbs_release_file(struct kref *ref)
srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
ib_dev = srcu_dereference(file->device->ib_dev,
&file->device->disassociate_srcu);
if (ib_dev && !ib_dev->disassociate_ucontext)
if (ib_dev && !ib_dev->ops.disassociate_ucontext)
module_put(ib_dev->owner);
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
......@@ -774,7 +774,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
goto out;
}
ret = ucontext->device->mmap(ucontext, vma);
ret = ucontext->device->ops.mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
return ret;
......@@ -1036,7 +1036,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
/* In case IB device supports disassociate ucontext, there is no hard
* dependency between uverbs device and its low level device.
*/
module_dependent = !(ib_dev->disassociate_ucontext);
module_dependent = !(ib_dev->ops.disassociate_ucontext);
if (module_dependent) {
if (!try_module_get(ib_dev->owner)) {
......@@ -1203,7 +1203,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
struct ib_uverbs_device *uverbs_dev;
int ret;
if (!device->alloc_ucontext)
if (!device->ops.alloc_ucontext)
return;
uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
......@@ -1249,7 +1249,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
cdev_init(&uverbs_dev->cdev,
device->mmap ? &uverbs_mmap_fops : &uverbs_fops);
device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
uverbs_dev->cdev.owner = THIS_MODULE;
ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
......@@ -1337,7 +1337,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
ida_free(&uverbs_ida, uverbs_dev->devnum);
if (device->disassociate_ucontext) {
if (device->ops.disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
* Userspace will see a EIO errno for all future access.
* Upon returning, ib_device may be freed internally and is not
......
......@@ -54,7 +54,7 @@ static int uverbs_free_flow(struct ib_uobject *uobject,
struct ib_qp *qp = flow->qp;
int ret;
ret = flow->device->destroy_flow(flow);
ret = flow->device->ops.destroy_flow(flow);
if (!ret) {
if (qp)
atomic_dec(&qp->usecnt);
......
......@@ -44,7 +44,7 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
if (ret)
return ret;
return counters->device->destroy_counters(counters);
return counters->device->ops.destroy_counters(counters);
}
static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
......@@ -61,10 +61,10 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
* have the ability to remove methods from parse tree once
* such condition is met.
*/
if (!ib_dev->create_counters)
if (!ib_dev->ops.create_counters)
return -EOPNOTSUPP;
counters = ib_dev->create_counters(ib_dev, attrs);
counters = ib_dev->ops.create_counters(ib_dev, attrs);
if (IS_ERR(counters)) {
ret = PTR_ERR(counters);
goto err_create_counters;
......@@ -90,7 +90,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
int ret;
if (!counters->device->read_counters)
if (!counters->device->ops.read_counters)
return -EOPNOTSUPP;
if (!atomic_read(&counters->usecnt))
......@@ -109,7 +109,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
if (IS_ERR(read_attr.counters_buff))
return PTR_ERR(read_attr.counters_buff);
ret = counters->device->read_counters(counters, &read_attr, attrs);
ret = counters->device->ops.read_counters(counters, &read_attr, attrs);
if (ret)
return ret;
......
......@@ -71,7 +71,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
struct ib_uverbs_completion_event_file *ev_file = NULL;
struct ib_uobject *ev_file_uobj;
if (!ib_dev->create_cq || !ib_dev->destroy_cq)
if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.comp_vector, attrs,
......@@ -110,8 +110,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->async_list);
cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context,
&attrs->driver_udata);
cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
&attrs->driver_udata);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
goto err_event_file;
......
......@@ -43,7 +43,7 @@ static int uverbs_free_dm(struct ib_uobject *uobject,
if (ret)
return ret;
return dm->device->dealloc_dm(dm);
return dm->device->ops.dealloc_dm(dm);
}
static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
......@@ -57,7 +57,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
struct ib_dm *dm;
int ret;
if (!ib_dev->alloc_dm)
if (!ib_dev->ops.alloc_dm)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.length, attrs,
......@@ -70,7 +70,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(
if (ret)
return ret;
dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs);
dm = ib_dev->ops.alloc_dm(ib_dev, uobj->context, &attr, attrs);
if (IS_ERR(dm))
return PTR_ERR(dm);
......
......@@ -43,7 +43,7 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject,
if (ret)
return ret;
return action->device->destroy_flow_action(action);
return action->device->ops.destroy_flow_action(action);
}
static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs,
......@@ -313,7 +313,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
struct ib_flow_action *action;
struct ib_flow_action_esp_attr esp_attr = {};
if (!ib_dev->create_flow_action_esp)
if (!ib_dev->ops.create_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false);
......@@ -321,7 +321,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
return ret;
/* No need to check as this attribute is marked as MANDATORY */
action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr,
attrs);
if (IS_ERR(action))
return PTR_ERR(action);
......@@ -340,7 +341,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
int ret;
struct ib_flow_action_esp_attr esp_attr = {};
if (!action->device->modify_flow_action_esp)
if (!action->device->ops.modify_flow_action_esp)
return -EOPNOTSUPP;
ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true);
......@@ -350,8 +351,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(
if (action->type != IB_FLOW_ACTION_ESP)
return -EINVAL;
return action->device->modify_flow_action_esp(action, &esp_attr.hdr,
attrs);
return action->device->ops.modify_flow_action_esp(action,
&esp_attr.hdr,
attrs);
}
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
......
......@@ -54,7 +54,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
struct ib_mr *mr;
int ret;
if (!ib_dev->reg_dm_mr)
if (!ib_dev->ops.reg_dm_mr)
return -EOPNOTSUPP;
ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET);
......@@ -83,7 +83,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
attr.length > dm->length - attr.offset)
return -EINVAL;
mr = pd->device->reg_dm_mr(pd, dm, &attr, attrs);
mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs);
if (IS_ERR(mr))
return PTR_ERR(mr);
......
......@@ -300,7 +300,8 @@ static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev,
return 0;
case UAPI_DEF_IS_SUPPORTED_DEV_FN: {
void **ibdev_fn = (void *)ibdev + def->needs_fn_offset;
void **ibdev_fn =
(void *)(&ibdev->ops) + def->needs_fn_offset;
if (*ibdev_fn)
continue;
......
This diff is collapsed.
......@@ -3478,7 +3478,7 @@ static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
/* Need to free the Last Streaming Mode Message */
if (iwqp->ietf_mem.va) {
if (iwqp->lsmm_mr)
iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
iwibdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
}
}
......
......@@ -849,7 +849,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
for (i = 1; i <= dev->num_ports; ++i) {
if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) {
ret = -EFAULT;
goto err_unregister;
}
......
......@@ -150,7 +150,7 @@ static int get_port_state(struct ib_device *ibdev,
int ret;
memset(&attr, 0, sizeof(attr));
ret = ibdev->query_port(ibdev, port_num, &attr);
ret = ibdev->ops.query_port(ibdev, port_num, &attr);
if (!ret)
*state = attr.state;
return ret;
......
......@@ -3033,7 +3033,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
/* Need to free the Last Streaming Mode Message */
if (nesqp->ietf_frame) {
if (nesqp->lsmm_mr)
nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr);
pci_free_consistent(nesdev->pcidev,
nesqp->private_data_len + nesqp->ietf_frame_size,
nesqp->ietf_frame, nesqp->ietf_frame_pbase);
......
......@@ -456,31 +456,31 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* rdmavt does not support modify device currently drivers must
* provide.
*/
if (!rdi->ibdev.modify_device)
if (!rdi->ibdev.ops.modify_device)
return -EOPNOTSUPP;
break;
case QUERY_PORT:
if (!rdi->ibdev.query_port)
if (!rdi->ibdev.ops.query_port)
if (!rdi->driver_f.query_port_state)
return -EINVAL;
break;
case MODIFY_PORT:
if (!rdi->ibdev.modify_port)
if (!rdi->ibdev.ops.modify_port)
if (!rdi->driver_f.cap_mask_chg ||
!rdi->driver_f.shut_down_port)
return -EINVAL;
break;
case QUERY_GID:
if (!rdi->ibdev.query_gid)
if (!rdi->ibdev.ops.query_gid)
if (!rdi->driver_f.get_guid_be)
return -EINVAL;
break;
case CREATE_QP:
if (!rdi->ibdev.create_qp)
if (!rdi->ibdev.ops.create_qp)
if (!rdi->driver_f.qp_priv_alloc ||
!rdi->driver_f.qp_priv_free ||
!rdi->driver_f.notify_qp_reset ||
......@@ -491,7 +491,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
break;
case MODIFY_QP:
if (!rdi->ibdev.modify_qp)
if (!rdi->ibdev.ops.modify_qp)
if (!rdi->driver_f.notify_qp_reset ||
!rdi->driver_f.schedule_send ||
!rdi->driver_f.get_pmtu_from_attr ||
......@@ -505,7 +505,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
break;
case DESTROY_QP:
if (!rdi->ibdev.destroy_qp)
if (!rdi->ibdev.ops.destroy_qp)
if (!rdi->driver_f.qp_priv_free ||
!rdi->driver_f.notify_qp_reset ||
!rdi->driver_f.flush_qp_waiters ||
......@@ -515,7 +515,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
break;
case POST_SEND:
if (!rdi->ibdev.post_send)
if (!rdi->ibdev.ops.post_send)
if (!rdi->driver_f.schedule_send ||
!rdi->driver_f.do_send ||
!rdi->post_parms)
......
......@@ -2453,8 +2453,8 @@ static struct net_device *ipoib_add_port(const char *format,
return ERR_PTR(result);
}
if (hca->rdma_netdev_get_params) {
int rc = hca->rdma_netdev_get_params(hca, port,
if (hca->ops.rdma_netdev_get_params) {
int rc = hca->ops.rdma_netdev_get_params(hca, port,
RDMA_NETDEV_IPOIB,
&params);
......
......@@ -77,8 +77,8 @@ int iser_assign_reg_ops(struct iser_device *device)
struct ib_device *ib_dev = device->ib_device;
/* Assign function handles - based on FMR support */
if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
ib_dev->map_phys_fmr && ib_dev->unmap_fmr) {
if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
iser_info("FMR supported, using FMR for registration\n");
device->reg_ops = &fmr_ops;
} else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
......
......@@ -330,10 +330,10 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
struct rdma_netdev *rn;
int rc;
netdev = ibdev->alloc_rdma_netdev(ibdev, port_num,
RDMA_NETDEV_OPA_VNIC,
"veth%d", NET_NAME_UNKNOWN,
ether_setup);
netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num,
RDMA_NETDEV_OPA_VNIC,
"veth%d", NET_NAME_UNKNOWN,
ether_setup);
if (!netdev)
return ERR_PTR(-ENOMEM);
else if (IS_ERR(netdev))
......
......@@ -4063,8 +4063,10 @@ static void srp_add_one(struct ib_device *device)
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
device->map_phys_fmr && device->unmap_fmr);
srp_dev->has_fmr = (device->ops.alloc_fmr &&
device->ops.dealloc_fmr &&
device->ops.map_phys_fmr &&
device->ops.unmap_fmr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
......
......@@ -1724,7 +1724,7 @@ static struct smbd_connection *_smbd_get_connection(
info->responder_resources);
/* Need to send IRD/ORD in private data for iWARP */
info->id->device->get_port_immutable(
info->id->device->ops.get_port_immutable(
info->id->device, info->id->port_num, &port_immutable);
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
ird_ord_hdr[0] = info->responder_resources;
......
This diff is collapsed.
......@@ -419,10 +419,10 @@ struct uapi_definition {
.kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
.scope = UAPI_SCOPE_OBJECT, \
.needs_fn_offset = \
offsetof(struct ib_device, ibdev_fn) + \
offsetof(struct ib_device_ops, ibdev_fn) + \
BUILD_BUG_ON_ZERO( \
sizeof(((struct ib_device *)0)->ibdev_fn) != \
sizeof(void *)), \
sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
sizeof(void *)), \
}
/*
......@@ -434,10 +434,10 @@ struct uapi_definition {
.kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
.scope = UAPI_SCOPE_METHOD, \
.needs_fn_offset = \
offsetof(struct ib_device, ibdev_fn) + \
offsetof(struct ib_device_ops, ibdev_fn) + \
BUILD_BUG_ON_ZERO( \
sizeof(((struct ib_device *)0)->ibdev_fn) != \
sizeof(void *)), \
sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
sizeof(void *)), \
}
/* Call a function to determine if the entire object is supported or not */
......
......@@ -148,8 +148,8 @@ static void rds_ib_add_one(struct ib_device *device)
has_fr = (device->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
device->map_phys_fmr && device->unmap_fmr);
has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
device->ops.map_phys_fmr && device->ops.unmap_fmr);
rds_ibdev->use_fastreg = (has_fr && !has_fmr);
rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
......
......@@ -41,7 +41,7 @@ enum {
bool
fmr_is_supported(struct rpcrdma_ia *ia)
{
if (!ia->ri_device->alloc_fmr) {
if (!ia->ri_device->ops.alloc_fmr) {
pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
ia->ri_device->name);
return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment