Commit 0acb0cc7 authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford

IB/rdmavt: Initialize and teardown of qpn table

Add table init as well as teardown for handling qpn maps. Drivers can still
provide this functionality by setting the QP_INIT_DRIVER bit.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent b4e64397
...@@ -45,8 +45,205 @@ ...@@ -45,8 +45,205 @@
* *
*/ */
#include <linux/bitops.h>
#include <linux/lockdep.h>
#include "vt.h"
#include "qp.h" #include "qp.h"
static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
{
unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
*/
spin_lock(&qpt->lock);
if (map->page)
free_page(page);
else
map->page = (void *)page;
spin_unlock(&qpt->lock);
}
/**
* init_qpn_table - initialize the QP number table for a device
* @qpt: the QPN table
*/
static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
{
u32 offset, i;
struct rvt_qpn_map *map;
int ret = 0;
if (!(rdi->dparms.qpn_res_end > rdi->dparms.qpn_res_start))
return -EINVAL;
spin_lock_init(&qpt->lock);
qpt->last = rdi->dparms.qpn_start;
qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
/*
* Drivers may want some QPs beyond what we need for verbs let them use
* our qpn table. No need for two. Lets go ahead and mark the bitmaps
* for those. The reserved range must be *after* the range which verbs
* will pick from.
*/
/* Figure out number of bit maps needed before reserved range */
qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
/* This should always be zero */
offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
/* Starting with the first reserved bit map */
map = &qpt->map[qpt->nmaps];
rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
for (i = rdi->dparms.qpn_res_start; i < rdi->dparms.qpn_res_end; i++) {
if (!map->page) {
get_map_page(qpt, map);
if (!map->page) {
ret = -ENOMEM;
break;
}
}
set_bit(offset, map->page);
offset++;
if (offset == RVT_BITS_PER_PAGE) {
/* next page */
qpt->nmaps++;
map++;
offset = 0;
}
}
return ret;
}
/**
* free_qpn_table - free the QP number table for a device
* @qpt: the QPN table
*/
static void free_qpn_table(struct rvt_qpn_table *qpt)
{
int i;
for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
free_page((unsigned long)qpt->map[i].page);
}
int rvt_driver_qp_init(struct rvt_dev_info *rdi)
{
int i;
int ret = -ENOMEM;
if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) {
rvt_pr_info(rdi, "Driver is doing QP init.\n");
return 0;
}
if (!rdi->dparms.qp_table_size)
return -EINVAL;
/*
* If driver is not doing any QP allocation then make sure it is
* providing the necessary QP functions.
*/
if (!rdi->driver_f.free_all_qps)
return -EINVAL;
/* allocate parent object */
rdi->qp_dev = kzalloc(sizeof(*rdi->qp_dev), GFP_KERNEL);
if (!rdi->qp_dev)
return -ENOMEM;
/* allocate hash table */
rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
rdi->qp_dev->qp_table =
kmalloc(rdi->qp_dev->qp_table_size *
sizeof(*rdi->qp_dev->qp_table),
GFP_KERNEL);
if (!rdi->qp_dev->qp_table)
goto no_qp_table;
for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
spin_lock_init(&rdi->qp_dev->qpt_lock);
/* initialize qpn map */
if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
goto fail_table;
return ret;
fail_table:
kfree(rdi->qp_dev->qp_table);
free_qpn_table(&rdi->qp_dev->qpn_table);
no_qp_table:
kfree(rdi->qp_dev);
return ret;
}
/**
* free_all_qps - check for QPs still in use
* @qpt: the QP table to empty
*
* There should not be any QPs still in use.
* Free memory for table.
*/
static unsigned free_all_qps(struct rvt_dev_info *rdi)
{
unsigned long flags;
struct rvt_qp *qp;
unsigned n, qp_inuse = 0;
spinlock_t *ql; /* work around too long line below */
rdi->driver_f.free_all_qps(rdi);
if (!rdi->qp_dev)
return 0;
ql = &rdi->qp_dev->qpt_lock;
spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
lockdep_is_held(ql));
RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
qp = rcu_dereference_protected(qp->next,
lockdep_is_held(ql));
while (qp) {
qp_inuse++;
qp = rcu_dereference_protected(qp->next,
lockdep_is_held(ql));
}
}
spin_unlock_irqrestore(ql, flags);
synchronize_rcu();
return qp_inuse;
}
void rvt_qp_exit(struct rvt_dev_info *rdi)
{
u32 qps_inuse = free_all_qps(rdi);
qps_inuse = free_all_qps(rdi);
if (qps_inuse)
rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
qps_inuse);
if (!rdi->qp_dev)
return;
kfree(rdi->qp_dev->qp_table);
free_qpn_table(&rdi->qp_dev->qpn_table);
kfree(rdi->qp_dev);
}
/** /**
* rvt_create_qp - create a queue pair for a device * rvt_create_qp - create a queue pair for a device
* @ibpd: the protection domain who's device we create the queue pair for * @ibpd: the protection domain who's device we create the queue pair for
......
...@@ -50,6 +50,8 @@ ...@@ -50,6 +50,8 @@
#include <rdma/rdma_vt.h> #include <rdma/rdma_vt.h>
int rvt_driver_qp_init(struct rvt_dev_info *rdi);
void rvt_qp_exit(struct rvt_dev_info *rdi);
struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
......
...@@ -223,9 +223,23 @@ int rvt_register_device(struct rvt_dev_info *rdi) ...@@ -223,9 +223,23 @@ int rvt_register_device(struct rvt_dev_info *rdi)
(!rdi->driver_f.get_card_name) || (!rdi->driver_f.get_card_name) ||
(!rdi->driver_f.get_pci_dev) || (!rdi->driver_f.get_pci_dev) ||
(!rdi->driver_f.check_ah)) { (!rdi->driver_f.check_ah)) {
pr_err("Driver not supporting req func\n");
return -EINVAL; return -EINVAL;
} }
if (!rdi->dparms.nports) {
rvt_pr_err(rdi, "Driver says it has no ports.\n");
return -EINVAL;
}
rdi->ports = kcalloc(rdi->dparms.nports,
sizeof(struct rvt_ibport **),
GFP_KERNEL);
if (!rdi->ports) {
rvt_pr_err(rdi, "Could not allocate port mem.\n");
return -ENOMEM;
}
/* Once we get past here we can use the rvt_pr macros */ /* Once we get past here we can use the rvt_pr macros */
/* Dev Ops */ /* Dev Ops */
...@@ -240,6 +254,12 @@ int rvt_register_device(struct rvt_dev_info *rdi) ...@@ -240,6 +254,12 @@ int rvt_register_device(struct rvt_dev_info *rdi)
CHECK_DRIVER_OVERRIDE(rdi, get_port_immutable); CHECK_DRIVER_OVERRIDE(rdi, get_port_immutable);
/* Queue Pairs */ /* Queue Pairs */
ret = rvt_driver_qp_init(rdi);
if (ret) {
pr_err("Error in driver QP init.\n");
return -EINVAL;
}
CHECK_DRIVER_OVERRIDE(rdi, create_qp); CHECK_DRIVER_OVERRIDE(rdi, create_qp);
CHECK_DRIVER_OVERRIDE(rdi, modify_qp); CHECK_DRIVER_OVERRIDE(rdi, modify_qp);
CHECK_DRIVER_OVERRIDE(rdi, destroy_qp); CHECK_DRIVER_OVERRIDE(rdi, destroy_qp);
...@@ -300,19 +320,6 @@ int rvt_register_device(struct rvt_dev_info *rdi) ...@@ -300,19 +320,6 @@ int rvt_register_device(struct rvt_dev_info *rdi)
spin_lock_init(&rdi->n_pds_lock); spin_lock_init(&rdi->n_pds_lock);
rdi->n_pds_allocated = 0; rdi->n_pds_allocated = 0;
if (rdi->dparms.nports) {
rdi->ports = kcalloc(rdi->dparms.nports,
sizeof(struct rvt_ibport **),
GFP_KERNEL);
if (!rdi->ports) {
rvt_pr_err(rdi, "Could not allocate port mem.\n");
ret = -ENOMEM;
goto bail_mr;
}
} else {
rvt_pr_warn(rdi, "Driver says it has no ports.\n");
}
/* We are now good to announce we exist */ /* We are now good to announce we exist */
ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback); ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
if (ret) { if (ret) {
...@@ -327,6 +334,8 @@ int rvt_register_device(struct rvt_dev_info *rdi) ...@@ -327,6 +334,8 @@ int rvt_register_device(struct rvt_dev_info *rdi)
rvt_mr_exit(rdi); rvt_mr_exit(rdi);
bail_no_mr: bail_no_mr:
rvt_qp_exit(rdi);
return ret; return ret;
} }
EXPORT_SYMBOL(rvt_register_device); EXPORT_SYMBOL(rvt_register_device);
......
...@@ -172,7 +172,13 @@ struct rvt_driver_params { ...@@ -172,7 +172,13 @@ struct rvt_driver_params {
* For instance special module parameters. Goes here. * For instance special module parameters. Goes here.
*/ */
unsigned int lkey_table_size; unsigned int lkey_table_size;
unsigned int qp_table_size;
int qpn_start;
int qpn_inc;
int qpn_res_start;
int qpn_res_end;
int nports; int nports;
u8 qos_shift;
}; };
/* Protection domain */ /* Protection domain */
...@@ -205,6 +211,7 @@ struct rvt_driver_provided { ...@@ -205,6 +211,7 @@ struct rvt_driver_provided {
int (*port_callback)(struct ib_device *, u8, struct kobject *); int (*port_callback)(struct ib_device *, u8, struct kobject *);
const char * (*get_card_name)(struct rvt_dev_info *rdi); const char * (*get_card_name)(struct rvt_dev_info *rdi);
struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi); struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
void (*free_all_qps)(struct rvt_dev_info *rdi);
/*--------------------*/ /*--------------------*/
/* Optional functions */ /* Optional functions */
...@@ -245,6 +252,8 @@ struct rvt_dev_info { ...@@ -245,6 +252,8 @@ struct rvt_dev_info {
int flags; int flags;
struct rvt_ibport **ports; struct rvt_ibport **ports;
struct rvt_qp_ibdev *qp_dev;
}; };
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd) static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
......
...@@ -259,4 +259,37 @@ struct rvt_srq { ...@@ -259,4 +259,37 @@ struct rvt_srq {
u32 limit; u32 limit;
}; };
#define RVT_QPN_MAX BIT(24)
#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
/*
* QPN-map pages start out as NULL, they get allocated upon
* first use and are never deallocated. This way,
* large bitmaps are not allocated unless large numbers of QPs are used.
*/
struct rvt_qpn_map {
void *page;
};
struct rvt_qpn_table {
spinlock_t lock; /* protect changes to the qp table */
unsigned flags; /* flags for QP0/1 allocated for each port */
u32 last; /* last QP number allocated */
u32 nmaps; /* size of the map table */
u16 limit;
u8 incr;
/* bit map of free QP numbers other than 0/1 */
struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
};
struct rvt_qp_ibdev {
u32 qp_table_size;
u32 qp_table_bits;
struct rvt_qp __rcu **qp_table;
spinlock_t qpt_lock; /* qptable lock */
struct rvt_qpn_table qpn_table;
};
#endif /* DEF_RDMAVT_INCQP_H */ #endif /* DEF_RDMAVT_INCQP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment