Commit 7adf7d1b authored by Jon Mason's avatar Jon Mason Committed by David S. Miller

vxge: Fix multicast issues

Fix error in multicast flag check, add calls to restore the status of
multicast and promiscuous mode settings after change_mtu, and style
cleanups to shorten the function calls by using a temporary variable.
Signed-off-by: default avatarJon Mason <jon.mason@exar.com>
Signed-off-by: default avatarSreenivasa Honnur <sreenivasa.honnur@exar.com>
Signed-off-by: default avatarRamkrishna Vepa <ramkrishna.vepa@exar.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d03848e0
...@@ -1088,7 +1088,8 @@ static void vxge_set_multicast(struct net_device *dev) ...@@ -1088,7 +1088,8 @@ static void vxge_set_multicast(struct net_device *dev)
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
struct vxgedev *vdev; struct vxgedev *vdev;
int i, mcast_cnt = 0; int i, mcast_cnt = 0;
struct __vxge_hw_device *hldev; struct __vxge_hw_device *hldev;
struct vxge_vpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info; struct macInfo mac_info;
int vpath_idx = 0; int vpath_idx = 0;
...@@ -1108,46 +1109,48 @@ static void vxge_set_multicast(struct net_device *dev) ...@@ -1108,46 +1109,48 @@ static void vxge_set_multicast(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_assert(vdev->vpaths[i].is_open); vpath = &vdev->vpaths[i];
status = vxge_hw_vpath_mcast_enable( vxge_assert(vpath->is_open);
vdev->vpaths[i].handle); status = vxge_hw_vpath_mcast_enable(vpath->handle);
if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR, "failed to enable "
"multicast, status %d", status);
vdev->all_multi_flg = 1; vdev->all_multi_flg = 1;
} }
} else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_assert(vdev->vpaths[i].is_open); vpath = &vdev->vpaths[i];
status = vxge_hw_vpath_mcast_disable( vxge_assert(vpath->is_open);
vdev->vpaths[i].handle); status = vxge_hw_vpath_mcast_disable(vpath->handle);
vdev->all_multi_flg = 1; if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR, "failed to disable "
"multicast, status %d", status);
vdev->all_multi_flg = 0;
} }
} }
if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR,
"failed to %s multicast, status %d",
dev->flags & IFF_ALLMULTI ?
"enable" : "disable", status);
if (!vdev->config.addr_learn_en) { if (!vdev->config.addr_learn_en) {
if (dev->flags & IFF_PROMISC) { for (i = 0; i < vdev->no_of_vpath; i++) {
for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i];
vxge_assert(vdev->vpaths[i].is_open); vxge_assert(vpath->is_open);
if (dev->flags & IFF_PROMISC)
status = vxge_hw_vpath_promisc_enable( status = vxge_hw_vpath_promisc_enable(
vdev->vpaths[i].handle); vpath->handle);
} else
} else {
for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_assert(vdev->vpaths[i].is_open);
status = vxge_hw_vpath_promisc_disable( status = vxge_hw_vpath_promisc_disable(
vdev->vpaths[i].handle); vpath->handle);
} if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR, "failed to %s promisc"
", status %d", dev->flags&IFF_PROMISC ?
"enable" : "disable", status);
} }
} }
memset(&mac_info, 0, sizeof(struct macInfo)); memset(&mac_info, 0, sizeof(struct macInfo));
/* Update individual M_CAST address list */ /* Update individual M_CAST address list */
if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
list_head = &vdev->vpaths[0].mac_addr_list; list_head = &vdev->vpaths[0].mac_addr_list;
if ((netdev_mc_count(dev) + if ((netdev_mc_count(dev) +
...@@ -1157,14 +1160,7 @@ static void vxge_set_multicast(struct net_device *dev) ...@@ -1157,14 +1160,7 @@ static void vxge_set_multicast(struct net_device *dev)
/* Delete previous MC's */ /* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) { for (i = 0; i < mcast_cnt; i++) {
if (!list_empty(list_head))
mac_entry = (struct vxge_mac_addrs *)
list_first_entry(list_head,
struct vxge_mac_addrs,
item);
list_for_each_safe(entry, next, list_head) { list_for_each_safe(entry, next, list_head) {
mac_entry = (struct vxge_mac_addrs *) entry; mac_entry = (struct vxge_mac_addrs *) entry;
/* Copy the mac address to delete */ /* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr; mac_address = (u8 *)&mac_entry->macaddr;
...@@ -1207,9 +1203,7 @@ static void vxge_set_multicast(struct net_device *dev) ...@@ -1207,9 +1203,7 @@ static void vxge_set_multicast(struct net_device *dev)
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
/* Delete previous MC's */ /* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) { for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) { list_for_each_safe(entry, next, list_head) {
mac_entry = (struct vxge_mac_addrs *) entry; mac_entry = (struct vxge_mac_addrs *) entry;
/* Copy the mac address to delete */ /* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr; mac_address = (u8 *)&mac_entry->macaddr;
...@@ -1229,9 +1223,10 @@ static void vxge_set_multicast(struct net_device *dev) ...@@ -1229,9 +1223,10 @@ static void vxge_set_multicast(struct net_device *dev)
/* Enable all multicast */ /* Enable all multicast */
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_assert(vdev->vpaths[i].is_open); vpath = &vdev->vpaths[i];
status = vxge_hw_vpath_mcast_enable( vxge_assert(vpath->is_open);
vdev->vpaths[i].handle);
status = vxge_hw_vpath_mcast_enable(vpath->handle);
if (status != VXGE_HW_OK) { if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s:%d Enabling all multicasts failed", "%s:%d Enabling all multicasts failed",
...@@ -1392,6 +1387,7 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) ...@@ -1392,6 +1387,7 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
{ {
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int ret = 0; int ret = 0;
/* check if device is down already */ /* check if device is down already */
...@@ -1402,12 +1398,10 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) ...@@ -1402,12 +1398,10 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
return 0; return 0;
if (vdev->vpaths[vp_id].handle) { if (vpath->handle) {
if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle) if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
== VXGE_HW_OK) {
if (is_vxge_card_up(vdev) && if (is_vxge_card_up(vdev) &&
vxge_hw_vpath_recover_from_reset( vxge_hw_vpath_recover_from_reset(vpath->handle)
vdev->vpaths[vp_id].handle)
!= VXGE_HW_OK) { != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_recover_from_reset" "vxge_hw_vpath_recover_from_reset"
...@@ -1423,11 +1417,20 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) ...@@ -1423,11 +1417,20 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
} else } else
return VXGE_HW_FAIL; return VXGE_HW_FAIL;
vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); vxge_restore_vpath_mac_addr(vpath);
vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); vxge_restore_vpath_vid_table(vpath);
/* Enable all broadcast */ /* Enable all broadcast */
vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle); vxge_hw_vpath_bcast_enable(vpath->handle);
/* Enable all multicast */
if (vdev->all_multi_flg) {
status = vxge_hw_vpath_mcast_enable(vpath->handle);
if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR,
"%s:%d Enabling multicast failed",
__func__, __LINE__);
}
/* Enable the interrupts */ /* Enable the interrupts */
vxge_vpath_intr_enable(vdev, vp_id); vxge_vpath_intr_enable(vdev, vp_id);
...@@ -1435,17 +1438,17 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) ...@@ -1435,17 +1438,17 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
smp_wmb(); smp_wmb();
/* Enable the flow of traffic through the vpath */ /* Enable the flow of traffic through the vpath */
vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle); vxge_hw_vpath_enable(vpath->handle);
smp_wmb(); smp_wmb();
vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle); vxge_hw_vpath_rx_doorbell_init(vpath->handle);
vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK; vpath->ring.last_status = VXGE_HW_OK;
/* Vpath reset done */ /* Vpath reset done */
clear_bit(vp_id, &vdev->vp_reset); clear_bit(vp_id, &vdev->vp_reset);
/* Start the vpath queue */ /* Start the vpath queue */
vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo); vxge_wake_tx_queue(&vpath->fifo);
return ret; return ret;
} }
...@@ -1479,9 +1482,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) ...@@ -1479,9 +1482,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: execution mode is debug, returning..", "%s: execution mode is debug, returning..",
vdev->ndev->name); vdev->ndev->name);
clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
netif_tx_stop_all_queues(vdev->ndev); netif_tx_stop_all_queues(vdev->ndev);
return 0; return 0;
} }
} }
...@@ -1628,8 +1631,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) ...@@ -1628,8 +1631,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
*/ */
int vxge_reset(struct vxgedev *vdev) int vxge_reset(struct vxgedev *vdev)
{ {
do_vxge_reset(vdev, VXGE_LL_FULL_RESET); return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
return 0;
} }
/** /**
...@@ -1992,17 +1994,17 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) ...@@ -1992,17 +1994,17 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
/* reset vpaths */ /* reset vpaths */
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{ {
int i;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
int i;
for (i = 0; i < vdev->no_of_vpath; i++) for (i = 0; i < vdev->no_of_vpath; i++) {
if (vdev->vpaths[i].handle) { vpath = &vdev->vpaths[i];
if (vxge_hw_vpath_reset(vdev->vpaths[i].handle) if (vpath->handle) {
== VXGE_HW_OK) { if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
if (is_vxge_card_up(vdev) && if (is_vxge_card_up(vdev) &&
vxge_hw_vpath_recover_from_reset( vxge_hw_vpath_recover_from_reset(
vdev->vpaths[i].handle) vpath->handle) != VXGE_HW_OK) {
!= VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_recover_" "vxge_hw_vpath_recover_"
"from_reset failed for vpath: " "from_reset failed for vpath: "
...@@ -2016,83 +2018,87 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) ...@@ -2016,83 +2018,87 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
return status; return status;
} }
} }
}
return status; return status;
} }
/* close vpaths */ /* close vpaths */
void vxge_close_vpaths(struct vxgedev *vdev, int index) void vxge_close_vpaths(struct vxgedev *vdev, int index)
{ {
struct vxge_vpath *vpath;
int i; int i;
for (i = index; i < vdev->no_of_vpath; i++) { for (i = index; i < vdev->no_of_vpath; i++) {
if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) { vpath = &vdev->vpaths[i];
vxge_hw_vpath_close(vdev->vpaths[i].handle);
if (vpath->handle && vpath->is_open) {
vxge_hw_vpath_close(vpath->handle);
vdev->stats.vpaths_open--; vdev->stats.vpaths_open--;
} }
vdev->vpaths[i].is_open = 0; vpath->is_open = 0;
vdev->vpaths[i].handle = NULL; vpath->handle = NULL;
} }
} }
/* open vpaths */ /* open vpaths */
int vxge_open_vpaths(struct vxgedev *vdev) int vxge_open_vpaths(struct vxgedev *vdev)
{ {
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status; enum vxge_hw_status status;
int i; struct vxge_vpath *vpath;
u32 vp_id = 0; u32 vp_id = 0;
struct vxge_hw_vpath_attr attr; int i;
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_assert(vdev->vpaths[i].is_configured); vpath = &vdev->vpaths[i];
attr.vp_id = vdev->vpaths[i].device_id;
vxge_assert(vpath->is_configured);
attr.vp_id = vpath->device_id;
attr.fifo_attr.callback = vxge_xmit_compl; attr.fifo_attr.callback = vxge_xmit_compl;
attr.fifo_attr.txdl_term = vxge_tx_term; attr.fifo_attr.txdl_term = vxge_tx_term;
attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo; attr.fifo_attr.userdata = &vpath->fifo;
attr.ring_attr.callback = vxge_rx_1b_compl; attr.ring_attr.callback = vxge_rx_1b_compl;
attr.ring_attr.rxd_init = vxge_rx_initial_replenish; attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
attr.ring_attr.rxd_term = vxge_rx_term; attr.ring_attr.rxd_term = vxge_rx_term;
attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring; attr.ring_attr.userdata = &vpath->ring;
vdev->vpaths[i].ring.ndev = vdev->ndev; vpath->ring.ndev = vdev->ndev;
vdev->vpaths[i].ring.pdev = vdev->pdev; vpath->ring.pdev = vdev->pdev;
status = vxge_hw_vpath_open(vdev->devh, &attr, status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
&(vdev->vpaths[i].handle));
if (status == VXGE_HW_OK) { if (status == VXGE_HW_OK) {
vdev->vpaths[i].fifo.handle = vpath->fifo.handle =
(struct __vxge_hw_fifo *)attr.fifo_attr.userdata; (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
vdev->vpaths[i].ring.handle = vpath->ring.handle =
(struct __vxge_hw_ring *)attr.ring_attr.userdata; (struct __vxge_hw_ring *)attr.ring_attr.userdata;
vdev->vpaths[i].fifo.tx_steering_type = vpath->fifo.tx_steering_type =
vdev->config.tx_steering_type; vdev->config.tx_steering_type;
vdev->vpaths[i].fifo.ndev = vdev->ndev; vpath->fifo.ndev = vdev->ndev;
vdev->vpaths[i].fifo.pdev = vdev->pdev; vpath->fifo.pdev = vdev->pdev;
vdev->vpaths[i].fifo.indicate_max_pkts = vpath->fifo.indicate_max_pkts =
vdev->config.fifo_indicate_max_pkts; vdev->config.fifo_indicate_max_pkts;
vdev->vpaths[i].ring.rx_vector_no = 0; vpath->ring.rx_vector_no = 0;
vdev->vpaths[i].ring.rx_csum = vdev->rx_csum; vpath->ring.rx_csum = vdev->rx_csum;
vdev->vpaths[i].is_open = 1; vpath->is_open = 1;
vdev->vp_handles[i] = vdev->vpaths[i].handle; vdev->vp_handles[i] = vpath->handle;
vdev->vpaths[i].ring.gro_enable = vpath->ring.gro_enable = vdev->config.gro_enable;
vdev->config.gro_enable; vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
vdev->vpaths[i].ring.vlan_tag_strip =
vdev->vlan_tag_strip;
vdev->stats.vpaths_open++; vdev->stats.vpaths_open++;
} else { } else {
vdev->stats.vpath_open_fail++; vdev->stats.vpath_open_fail++;
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: vpath: %d failed to open " "%s: vpath: %d failed to open "
"with status: %d", "with status: %d",
vdev->ndev->name, vdev->vpaths[i].device_id, vdev->ndev->name, vpath->device_id,
status); status);
vxge_close_vpaths(vdev, 0); vxge_close_vpaths(vdev, 0);
return -EPERM; return -EPERM;
} }
vp_id = vp_id = vpath->handle->vpath->vp_id;
((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
vpath->vp_id;
vdev->vpaths_deployed |= vxge_mBIT(vp_id); vdev->vpaths_deployed |= vxge_mBIT(vp_id);
} }
return VXGE_HW_OK; return VXGE_HW_OK;
...@@ -2266,7 +2272,6 @@ static int vxge_alloc_msix(struct vxgedev *vdev) ...@@ -2266,7 +2272,6 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
vdev->vxge_entries[j].in_use = 0; vdev->vxge_entries[j].in_use = 0;
ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
if (ret > 0) { if (ret > 0) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d", "%s: MSI-X enable failed for %d vectors, ret: %d",
...@@ -2312,17 +2317,16 @@ static int vxge_enable_msix(struct vxgedev *vdev) ...@@ -2312,17 +2317,16 @@ static int vxge_enable_msix(struct vxgedev *vdev)
ret = vxge_alloc_msix(vdev); ret = vxge_alloc_msix(vdev);
if (!ret) { if (!ret) {
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
struct vxge_vpath *vpath = &vdev->vpaths[i];
/* If fifo or ring are not enabled /* If fifo or ring are not enabled, the MSIX vector for
the MSIX vector for that should be set to 0 * it should be set to 0.
Hence initializeing this array to all 0s. */
*/ vpath->ring.rx_vector_no = (vpath->device_id *
vdev->vpaths[i].ring.rx_vector_no = VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
(vdev->vpaths[i].device_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
vxge_hw_vpath_msix_set(vdev->vpaths[i].handle, vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
tim_msix_id, VXGE_ALARM_MSIX_ID); VXGE_ALARM_MSIX_ID);
} }
} }
...@@ -2537,9 +2541,10 @@ static void vxge_poll_vp_reset(unsigned long data) ...@@ -2537,9 +2541,10 @@ static void vxge_poll_vp_reset(unsigned long data)
static void vxge_poll_vp_lockup(unsigned long data) static void vxge_poll_vp_lockup(unsigned long data)
{ {
struct vxgedev *vdev = (struct vxgedev *)data; struct vxgedev *vdev = (struct vxgedev *)data;
int i;
struct vxge_ring *ring;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
struct vxge_ring *ring;
int i;
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring; ring = &vdev->vpaths[i].ring;
...@@ -2553,13 +2558,13 @@ static void vxge_poll_vp_lockup(unsigned long data) ...@@ -2553,13 +2558,13 @@ static void vxge_poll_vp_lockup(unsigned long data)
/* schedule vpath reset */ /* schedule vpath reset */
if (!test_and_set_bit(i, &vdev->vp_reset)) { if (!test_and_set_bit(i, &vdev->vp_reset)) {
vpath = &vdev->vpaths[i];
/* disable interrupts for this vpath */ /* disable interrupts for this vpath */
vxge_vpath_intr_disable(vdev, i); vxge_vpath_intr_disable(vdev, i);
/* stop the queue for this vpath */ /* stop the queue for this vpath */
vxge_stop_tx_queue(&vdev->vpaths[i]. vxge_stop_tx_queue(&vpath->fifo);
fifo);
continue; continue;
} }
} }
...@@ -2588,6 +2593,7 @@ vxge_open(struct net_device *dev) ...@@ -2588,6 +2593,7 @@ vxge_open(struct net_device *dev)
enum vxge_hw_status status; enum vxge_hw_status status;
struct vxgedev *vdev; struct vxgedev *vdev;
struct __vxge_hw_device *hldev; struct __vxge_hw_device *hldev;
struct vxge_vpath *vpath;
int ret = 0; int ret = 0;
int i; int i;
u64 val64, function_mode; u64 val64, function_mode;
...@@ -2626,15 +2632,17 @@ vxge_open(struct net_device *dev) ...@@ -2626,15 +2632,17 @@ vxge_open(struct net_device *dev)
netif_napi_add(dev, &vdev->napi, vxge_poll_inta, netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
vdev->config.napi_weight); vdev->config.napi_weight);
napi_enable(&vdev->napi); napi_enable(&vdev->napi);
for (i = 0; i < vdev->no_of_vpath; i++) for (i = 0; i < vdev->no_of_vpath; i++) {
vdev->vpaths[i].ring.napi_p = &vdev->napi; vpath = &vdev->vpaths[i];
vpath->ring.napi_p = &vdev->napi;
}
} else { } else {
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
netif_napi_add(dev, &vdev->vpaths[i].ring.napi, vpath = &vdev->vpaths[i];
netif_napi_add(dev, &vpath->ring.napi,
vxge_poll_msix, vdev->config.napi_weight); vxge_poll_msix, vdev->config.napi_weight);
napi_enable(&vdev->vpaths[i].ring.napi); napi_enable(&vpath->ring.napi);
vdev->vpaths[i].ring.napi_p = vpath->ring.napi_p = &vpath->ring.napi;
&vdev->vpaths[i].ring.napi;
} }
} }
...@@ -2651,9 +2659,10 @@ vxge_open(struct net_device *dev) ...@@ -2651,9 +2659,10 @@ vxge_open(struct net_device *dev)
} }
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
/* set initial mtu before enabling the device */ /* set initial mtu before enabling the device */
status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle, status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
vdev->mtu);
if (status != VXGE_HW_OK) { if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: fatal: can not set new MTU", dev->name); "%s: fatal: can not set new MTU", dev->name);
...@@ -2667,10 +2676,21 @@ vxge_open(struct net_device *dev) ...@@ -2667,10 +2676,21 @@ vxge_open(struct net_device *dev)
"%s: MTU is %d", vdev->ndev->name, vdev->mtu); "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
/* Reprogram the DA table with populated mac addresses */ /* Restore the DA, VID table and also multicast and promiscuous mode
for (i = 0; i < vdev->no_of_vpath; i++) { * states
vxge_restore_vpath_mac_addr(&vdev->vpaths[i]); */
vxge_restore_vpath_vid_table(&vdev->vpaths[i]); if (vdev->all_multi_flg) {
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
vxge_restore_vpath_mac_addr(vpath);
vxge_restore_vpath_vid_table(vpath);
status = vxge_hw_vpath_mcast_enable(vpath->handle);
if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR,
"%s:%d Enabling multicast failed",
__func__, __LINE__);
}
} }
/* Enable vpath to sniff all unicast/multicast traffic that not /* Enable vpath to sniff all unicast/multicast traffic that not
...@@ -2699,14 +2719,14 @@ vxge_open(struct net_device *dev) ...@@ -2699,14 +2719,14 @@ vxge_open(struct net_device *dev)
/* Enabling Bcast and mcast for all vpath */ /* Enabling Bcast and mcast for all vpath */
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle); vpath = &vdev->vpaths[i];
status = vxge_hw_vpath_bcast_enable(vpath->handle);
if (status != VXGE_HW_OK) if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s : Can not enable bcast for vpath " "%s : Can not enable bcast for vpath "
"id %d", dev->name, i); "id %d", dev->name, i);
if (vdev->config.addr_learn_en) { if (vdev->config.addr_learn_en) {
status = status = vxge_hw_vpath_mcast_enable(vpath->handle);
vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
if (status != VXGE_HW_OK) if (status != VXGE_HW_OK)
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s : Can not enable mcast for vpath " "%s : Can not enable mcast for vpath "
...@@ -2741,9 +2761,11 @@ vxge_open(struct net_device *dev) ...@@ -2741,9 +2761,11 @@ vxge_open(struct net_device *dev)
smp_wmb(); smp_wmb();
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
vxge_hw_vpath_enable(vdev->vpaths[i].handle); vpath = &vdev->vpaths[i];
vxge_hw_vpath_enable(vpath->handle);
smp_wmb(); smp_wmb();
vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); vxge_hw_vpath_rx_doorbell_init(vpath->handle);
} }
netif_tx_start_all_queues(vdev->ndev); netif_tx_start_all_queues(vdev->ndev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment