Commit 05eb2389 authored by Steve Wise's avatar Steve Wise Committed by David S. Miller

cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes

The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.

Design:

cxgb4:

Direct ULD callback from the DB FULL/DROP interrupt handler.  This allows
the ULD to stop doing user DB writes as quickly as possible.

While user DB usage is disabled, the LLD will accumulate DB write events
for its queues.  Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count.  This reduces the
load put on the DB fifo when reenabling.

iw_cxgb4:

Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps.  This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs.  If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP.  Thus we allow the new driver to work with an older libcxgb4.

When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED.  As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events.  Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.

When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list.  This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example).  QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO.  Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain.  This prevents resuming to
quickly and overflowing the FIFO.  Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.

The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops.  As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7a2cea2a
This diff is collapsed.
......@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext {
enum c4iw_rdev_flags {
T4_FATAL_ERROR = (1<<0),
T4_STATUS_PAGE_DISABLED = (1<<1),
};
struct c4iw_stat {
......@@ -130,6 +131,7 @@ struct c4iw_stats {
u64 db_empty;
u64 db_drop;
u64 db_state_transitions;
u64 db_fc_interruptions;
u64 tcam_full;
u64 act_ofld_conn_fails;
u64 pas_ofld_conn_fails;
......@@ -150,6 +152,7 @@ struct c4iw_rdev {
unsigned long oc_mw_pa;
void __iomem *oc_mw_kva;
struct c4iw_stats stats;
struct t4_dev_status_page *status_page;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
......@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
enum db_state {
NORMAL = 0,
FLOW_CONTROL = 1,
RECOVERY = 2
RECOVERY = 2,
STOPPED = 3
};
struct c4iw_dev {
......@@ -225,10 +229,10 @@ struct c4iw_dev {
struct mutex db_mutex;
struct dentry *debugfs_root;
enum db_state db_state;
int qpcnt;
struct idr hwtid_idr;
struct idr atid_idr;
struct idr stid_idr;
struct list_head db_fc_list;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
......@@ -432,6 +436,7 @@ struct c4iw_qp_attributes {
struct c4iw_qp {
struct ib_qp ibqp;
struct list_head db_fc_entry;
struct c4iw_dev *rhp;
struct c4iw_ep *ep;
struct c4iw_qp_attributes attr;
......
......@@ -106,15 +106,54 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
static int warned;
struct c4iw_alloc_ucontext_resp uresp;
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return ERR_PTR(-ENOMEM);
if (!context) {
ret = -ENOMEM;
goto err;
}
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
if (udata->outlen < sizeof(uresp)) {
if (!warned++)
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm)
goto err_free;
uresp.status_page_size = PAGE_SIZE;
spin_lock(&context->mmap_lock);
uresp.status_page_key = context->key;
context->key += PAGE_SIZE;
spin_unlock(&context->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret)
goto err_mm;
mm->key = uresp.status_page_key;
mm->addr = virt_to_phys(rhp->rdev.status_page);
mm->len = PAGE_SIZE;
insert_mmap(context, mm);
}
return &context->ibucontext;
err_mm:
kfree(mm);
err_free:
kfree(context);
err:
return ERR_PTR(ret);
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
......
......@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait));
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
{
if (list_empty(entry))
list_add_tail(entry, head);
}
static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
spin_lock_irqsave(&qhp->rhp->lock, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL) {
t4_ring_sq_db(&qhp->wq, inc);
} else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.sq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&qhp->rhp->lock, flags);
return 0;
}
static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
spin_lock_irqsave(&qhp->rhp->lock, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL) {
t4_ring_rq_db(&qhp->wq, inc);
} else {
add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
qhp->wq.rq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&qhp->rhp->lock, flags);
return 0;
}
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
......@@ -750,9 +790,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
if (t4_wq_db_enabled(&qhp->wq))
if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx);
spin_unlock_irqrestore(&qhp->lock, flag);
} else {
spin_unlock_irqrestore(&qhp->lock, flag);
ring_kernel_sq_db(qhp, idx);
}
return err;
}
......@@ -812,9 +856,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wr = wr->next;
num_wrs--;
}
if (t4_wq_db_enabled(&qhp->wq))
if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_rq_db(&qhp->wq, idx);
spin_unlock_irqrestore(&qhp->lock, flag);
} else {
spin_unlock_irqrestore(&qhp->lock, flag);
ring_kernel_rq_db(qhp, idx);
}
return err;
}
......@@ -1200,35 +1248,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
return ret;
}
/*
* Called by the library when the qp has user dbs disabled due to
* a DB_FULL condition. This function will single-thread all user
* DB rings to avoid overflowing the hw db-fifo.
*/
static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
{
int delay = db_delay_usecs;
mutex_lock(&qhp->rhp->db_mutex);
do {
/*
* The interrupt threshold is dbfifo_int_thresh << 6. So
* make sure we don't cross that and generate an interrupt.
*/
if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
(qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
writel(QID(qid) | PIDX(inc), qhp->wq.db);
break;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(delay));
delay = min(delay << 1, 2000);
} while (1);
mutex_unlock(&qhp->rhp->db_mutex);
return 0;
}
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
enum c4iw_qp_attr_mask mask,
struct c4iw_qp_attributes *attrs,
......@@ -1278,11 +1297,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
if (mask & C4IW_QP_ATTR_SQ_DB) {
ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
goto out;
}
if (mask & C4IW_QP_ATTR_RQ_DB) {
ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
goto out;
}
......@@ -1465,14 +1484,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
return ret;
}
static int enable_qp_db(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
t4_enable_wq_db(&qp->wq);
return 0;
}
int c4iw_destroy_qp(struct ib_qp *ib_qp)
{
struct c4iw_dev *rhp;
......@@ -1490,22 +1501,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
spin_lock_irq(&rhp->lock);
remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
rhp->qpcnt--;
BUG_ON(rhp->qpcnt < 0);
if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = NORMAL;
idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
}
if (db_coalescing_threshold >= 0)
if (rhp->qpcnt <= db_coalescing_threshold)
cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
spin_unlock_irq(&rhp->lock);
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
spin_lock_irq(&rhp->lock);
if (!list_empty(&qhp->db_fc_entry))
list_del_init(&qhp->db_fc_entry);
spin_unlock_irq(&rhp->lock);
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq,
......@@ -1516,14 +1520,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
return 0;
}
static int disable_qp_db(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
t4_disable_wq_db(&qp->wq);
return 0;
}
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct ib_udata *udata)
{
......@@ -1610,20 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
spin_lock_irq(&rhp->lock);
if (rhp->db_state != NORMAL)
t4_disable_wq_db(&qhp->wq);
rhp->qpcnt++;
if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = FLOW_CONTROL;
idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
}
if (db_coalescing_threshold >= 0)
if (rhp->qpcnt > db_coalescing_threshold)
cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
spin_unlock_irq(&rhp->lock);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
goto err2;
......@@ -1709,6 +1692,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry);
PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
......
......@@ -300,6 +300,7 @@ struct t4_sq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
u16 wq_pidx_inc;
u16 flags;
short flush_cidx;
};
......@@ -324,6 +325,7 @@ struct t4_rq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
u16 wq_pidx_inc;
};
struct t4_wq {
......@@ -609,3 +611,7 @@ static inline void t4_set_cq_in_error(struct t4_cq *cq)
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
}
#endif
struct t4_dev_status_page {
u8 db_off;
};
......@@ -70,4 +70,9 @@ struct c4iw_create_qp_resp {
__u32 qid_mask;
__u32 flags;
};
struct c4iw_alloc_ucontext_resp {
__u64 status_page_key;
__u32 status_page_size;
};
#endif
......@@ -500,6 +500,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
unsigned short db_pidx_inc;
u64 udb;
};
......
......@@ -3578,14 +3578,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
static void disable_txq_db(struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
unsigned long flags;
spin_lock_irqsave(&q->db_lock, flags);
q->db_disabled = 1;
spin_unlock_irq(&q->db_lock);
spin_unlock_irqrestore(&q->db_lock, flags);
}
static void enable_txq_db(struct sge_txq *q)
static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
if (q->db_pidx_inc) {
/* Make sure that all writes to the TX descriptors
* are committed before we tell HW about them.
*/
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
q->db_pidx_inc = 0;
}
q->db_disabled = 0;
spin_unlock_irq(&q->db_lock);
}
......@@ -3607,11 +3618,32 @@ static void enable_dbs(struct adapter *adap)
int i;
for_each_ethrxq(&adap->sge, i)
enable_txq_db(&adap->sge.ethtxq[i].q);
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
enable_txq_db(&adap->sge.ofldtxq[i].q);
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(&adap->sge.ctrlq[i].q);
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
}
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
if (adap->uld_handle[CXGB4_ULD_RDMA])
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
cmd);
}
static void process_db_full(struct work_struct *work)
{
struct adapter *adap;
adap = container_of(work, struct adapter, db_full_task);
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT,
DBFIFO_HP_INT | DBFIFO_LP_INT);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
......@@ -3619,7 +3651,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
u16 hw_pidx, hw_cidx;
int ret;
spin_lock_bh(&q->db_lock);
spin_lock_irq(&q->db_lock);
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
if (ret)
goto out;
......@@ -3636,7 +3668,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
}
out:
q->db_disabled = 0;
spin_unlock_bh(&q->db_lock);
q->db_pidx_inc = 0;
spin_unlock_irq(&q->db_lock);
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
......@@ -3652,29 +3685,6 @@ static void recover_all_queues(struct adapter *adap)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
mutex_lock(&uld_mutex);
if (adap->uld_handle[CXGB4_ULD_RDMA])
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
cmd);
mutex_unlock(&uld_mutex);
}
static void process_db_full(struct work_struct *work)
{
struct adapter *adap;
adap = container_of(work, struct adapter, db_full_task);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
drain_db_fifo(adap, dbfifo_drain_delay);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT,
DBFIFO_HP_INT | DBFIFO_LP_INT);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
}
static void process_db_drop(struct work_struct *work)
{
struct adapter *adap;
......@@ -3682,11 +3692,13 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
if (is_t4(adap->params.chip)) {
disable_dbs(adap);
drain_db_fifo(adap, dbfifo_drain_delay);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
drain_db_fifo(adap, 1);
drain_db_fifo(adap, dbfifo_drain_delay);
recover_all_queues(adap);
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
} else {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
......@@ -3727,6 +3739,8 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
......@@ -3735,7 +3749,10 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap)
{
if (is_t4(adap->params.chip))
if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
}
queue_work(workq, &adap->db_drop_task);
}
......
......@@ -860,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
unsigned int *wr, index;
unsigned long flags;
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled) {
if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
......@@ -878,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
writel(n, adap->bar2 + q->udb + 8);
wmb();
}
}
} else
q->db_pidx_inc += n;
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
spin_unlock_irqrestore(&q->db_lock, flags);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment