Commit 44fb3126 authored by Thomas Klein's avatar Thomas Klein Committed by Jeff Garzik

ehea: Fix DLPAR memory add support

This patch fixes two weaknesses in send/receive packet handling which may
lead to kernel panics during DLPAR memory add operations.
Signed-off-by: default avatarThomas Klein <tklein@de.ibm.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 5a81f143
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <asm/io.h> #include <asm/io.h>
#define DRV_NAME "ehea" #define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0089" #define DRV_VERSION "EHEA_0090"
/* eHEA capability flags */ /* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1 #define DLPAR_PORT_ADD_REM 1
...@@ -371,6 +371,7 @@ struct ehea_port_res { ...@@ -371,6 +371,7 @@ struct ehea_port_res {
struct ehea_q_skb_arr rq2_skba; struct ehea_q_skb_arr rq2_skba;
struct ehea_q_skb_arr rq3_skba; struct ehea_q_skb_arr rq3_skba;
struct ehea_q_skb_arr sq_skba; struct ehea_q_skb_arr sq_skba;
int sq_skba_size;
spinlock_t netif_queue; spinlock_t netif_queue;
int queue_stopped; int queue_stopped;
int swqe_refill_th; int swqe_refill_th;
......
...@@ -349,7 +349,8 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) ...@@ -349,7 +349,8 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
pr->rq1_skba.os_skbs = 0; pr->rq1_skba.os_skbs = 0;
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
pr->rq1_skba.index = index; if (nr_of_wqes > 0)
pr->rq1_skba.index = index;
pr->rq1_skba.os_skbs = fill_wqes; pr->rq1_skba.os_skbs = fill_wqes;
return; return;
} }
...@@ -1464,7 +1465,9 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, ...@@ -1464,7 +1465,9 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
init_attr->act_nr_rwqes_rq2, init_attr->act_nr_rwqes_rq2,
init_attr->act_nr_rwqes_rq3); init_attr->act_nr_rwqes_rq3);
ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1); pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
...@@ -2621,6 +2624,22 @@ void ehea_purge_sq(struct ehea_qp *orig_qp) ...@@ -2621,6 +2624,22 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
} }
} }
void ehea_flush_sq(struct ehea_port *port)
{
int i;
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
int k = 0;
while (atomic_read(&pr->swqe_avail) < swqe_max) {
msleep(5);
if (++k == 20)
break;
}
}
}
int ehea_stop_qps(struct net_device *dev) int ehea_stop_qps(struct net_device *dev)
{ {
struct ehea_port *port = netdev_priv(dev); struct ehea_port *port = netdev_priv(dev);
...@@ -2845,6 +2864,7 @@ static void ehea_rereg_mrs(struct work_struct *work) ...@@ -2845,6 +2864,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
if (dev->flags & IFF_UP) { if (dev->flags & IFF_UP) {
down(&port->port_lock); down(&port->port_lock);
netif_stop_queue(dev); netif_stop_queue(dev);
ehea_flush_sq(port);
ret = ehea_stop_qps(dev); ret = ehea_stop_qps(dev);
if (ret) { if (ret) {
up(&port->port_lock); up(&port->port_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment