Commit 2c69448b authored by Jan-Bernd Themann's avatar Jan-Bernd Themann Committed by David S. Miller

ehea: DLPAR memory add fix

Due to stability issues in high load situations the HW queue handling
has to be changed. The HW queues are now stopped and restarted again instead
of destroying and allocating new HW queues.
Signed-off-by: default avatarJan-Bernd Themann <themann@de.ibm.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 31a5bb04
...@@ -40,13 +40,13 @@ ...@@ -40,13 +40,13 @@
#include <asm/io.h> #include <asm/io.h>
#define DRV_NAME "ehea" #define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0074" #define DRV_VERSION "EHEA_0077"
/* eHEA capability flags */ /* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1 #define DLPAR_PORT_ADD_REM 1
#define DLPAR_MEM_ADD 2 #define DLPAR_MEM_ADD 2
#define DLPAR_MEM_REM 4 #define DLPAR_MEM_REM 4
#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD)
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
......
This diff is collapsed.
...@@ -126,6 +126,7 @@ struct hcp_modify_qp_cb0 { ...@@ -126,6 +126,7 @@ struct hcp_modify_qp_cb0 {
#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
struct hcp_modify_qp_cb1 { struct hcp_modify_qp_cb1 {
u32 qpn; /* 00 */ u32 qpn; /* 00 */
......
...@@ -563,8 +563,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) ...@@ -563,8 +563,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
int ehea_create_busmap( void ) int ehea_create_busmap( void )
{ {
u64 vaddr = EHEA_BUSMAP_START; u64 vaddr = EHEA_BUSMAP_START;
unsigned long abs_max_pfn = 0; unsigned long high_section_index = 0;
unsigned long sec_max_pfn;
int i; int i;
/* /*
...@@ -574,14 +573,10 @@ int ehea_create_busmap( void ) ...@@ -574,14 +573,10 @@ int ehea_create_busmap( void )
ehea_bmap.valid_sections = 0; ehea_bmap.valid_sections = 0;
for (i = 0; i < NR_MEM_SECTIONS; i++) for (i = 0; i < NR_MEM_SECTIONS; i++)
if (valid_section_nr(i)) { if (valid_section_nr(i))
sec_max_pfn = section_nr_to_pfn(i); high_section_index = i;
if (sec_max_pfn > abs_max_pfn)
abs_max_pfn = sec_max_pfn;
ehea_bmap.valid_sections++;
}
ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; ehea_bmap.entries = high_section_index + 1;
ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
if (!ehea_bmap.vaddr) if (!ehea_bmap.vaddr)
...@@ -593,6 +588,7 @@ int ehea_create_busmap( void ) ...@@ -593,6 +588,7 @@ int ehea_create_busmap( void )
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
ehea_bmap.vaddr[i] = vaddr; ehea_bmap.vaddr[i] = vaddr;
vaddr += EHEA_SECTSIZE; vaddr += EHEA_SECTSIZE;
ehea_bmap.valid_sections++;
} else } else
ehea_bmap.vaddr[i] = 0; ehea_bmap.vaddr[i] = 0;
} }
...@@ -637,7 +633,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) ...@@ -637,7 +633,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!pt) { if (!pt) {
ehea_error("no mem"); ehea_error("no mem");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -660,8 +656,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) ...@@ -660,8 +656,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
void *sectbase = __va(i << SECTION_SIZE_BITS); void *sectbase = __va(i << SECTION_SIZE_BITS);
unsigned long k = 0; unsigned long k = 0;
for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); for (j = 0; j < (EHEA_PAGES_PER_SECTION /
j++) { EHEA_MAX_RPAGE); j++) {
for (m = 0; m < EHEA_MAX_RPAGE; m++) { for (m = 0; m < EHEA_MAX_RPAGE; m++) {
pg = sectbase + ((k++) * EHEA_PAGESIZE); pg = sectbase + ((k++) * EHEA_PAGESIZE);
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#define EHEA_PAGESHIFT 12 #define EHEA_PAGESHIFT 12
#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
#define EHEA_SECTSIZE (1UL << 24) #define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
#error eHEA module can't work if kernel sectionsize < ehea sectionsize #error eHEA module can't work if kernel sectionsize < ehea sectionsize
...@@ -145,7 +145,7 @@ struct ehea_rwqe { ...@@ -145,7 +145,7 @@ struct ehea_rwqe {
#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
#define EHEA_CQE_TYPE_RQ 0x60 #define EHEA_CQE_TYPE_RQ 0x60
#define EHEA_CQE_STAT_ERR_MASK 0x721F #define EHEA_CQE_STAT_ERR_MASK 0x720F
#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
#define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_TCP 0x4000
#define EHEA_CQE_STAT_ERR_IP 0x2000 #define EHEA_CQE_STAT_ERR_IP 0x2000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment