Commit 8676ea8f authored by David S. Miller's avatar David S. Miller

Merge branch 'aquantia-next'

Pavel Belous says:

====================
net: ethernet: aquantia: improvements and fixes

The following patchset contains improvements and fixes for aQuantia
AQtion ethernet driver from net-next tree.

Most fixes are based on the comments from Lino Sanfilippo.

Sanity testing was performed on real HW. No regression found.

v1->v2: 1)Removed buffers copying.
	2)Fixed dma error handling.

v2->v3: 1)Fixes for aq_ndev_change_mtu:
	-Use core MTU checking for min_mtu.
	-Removed extra new_mtu assigment.
	2)Reverse XMAS tree in aq_ring_rx_fill.
v3->v4: 1)Use ndev->reg_state instead "is_ndev_registered" flag.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 45ee2440 e399553d
...@@ -87,33 +87,17 @@ static int aq_ndev_close(struct net_device *ndev) ...@@ -87,33 +87,17 @@ static int aq_ndev_close(struct net_device *ndev)
static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_xmit(aq_nic, skb);
if (err < 0)
goto err_exit;
err_exit: return aq_nic_xmit(aq_nic, skb);
return err;
} }
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{ {
struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0; int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (new_mtu == ndev->mtu) {
err = 0;
goto err_exit;
}
if (new_mtu < 68) {
err = -EINVAL;
goto err_exit;
}
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0) if (err < 0)
goto err_exit; goto err_exit;
ndev->mtu = new_mtu;
if (netif_running(ndev)) { if (netif_running(ndev)) {
aq_ndev_close(ndev); aq_ndev_close(ndev);
...@@ -252,22 +236,4 @@ static struct pci_driver aq_pci_ops = { ...@@ -252,22 +236,4 @@ static struct pci_driver aq_pci_ops = {
.resume = aq_pci_resume, .resume = aq_pci_resume,
}; };
static int __init aq_module_init(void) module_pci_driver(aq_pci_ops);
{
int err = 0;
err = pci_register_driver(&aq_pci_ops);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static void __exit aq_module_exit(void)
{
pci_unregister_driver(&aq_pci_ops);
}
module_init(aq_module_init);
module_exit(aq_module_exit);
...@@ -122,14 +122,11 @@ static void aq_nic_service_timer_cb(unsigned long param) ...@@ -122,14 +122,11 @@ static void aq_nic_service_timer_cb(unsigned long param)
struct aq_nic_s *self = (struct aq_nic_s *)param; struct aq_nic_s *self = (struct aq_nic_s *)param;
struct net_device *ndev = aq_nic_get_ndev(self); struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0; int err = 0;
bool is_busy = false;
unsigned int i = 0U; unsigned int i = 0U;
struct aq_hw_link_status_s link_status; struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx; struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx; struct aq_ring_stats_tx_s stats_tx;
atomic_inc(&self->header.busy_count);
is_busy = true;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit; goto err_exit;
...@@ -170,8 +167,6 @@ static void aq_nic_service_timer_cb(unsigned long param) ...@@ -170,8 +167,6 @@ static void aq_nic_service_timer_cb(unsigned long param)
ndev->stats.tx_errors = stats_tx.errors; ndev->stats.tx_errors = stats_tx.errors;
err_exit: err_exit:
if (is_busy)
atomic_dec(&self->header.busy_count);
mod_timer(&self->service_timer, mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
} }
...@@ -207,18 +202,20 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, ...@@ -207,18 +202,20 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
int err = 0; int err = 0;
ndev = aq_nic_ndev_alloc(); ndev = aq_nic_ndev_alloc();
self = netdev_priv(ndev); if (!ndev) {
if (!self) { err = -ENOMEM;
err = -EINVAL;
goto err_exit; goto err_exit;
} }
self = netdev_priv(ndev);
ndev->netdev_ops = ndev_ops; ndev->netdev_ops = ndev_ops;
ndev->ethtool_ops = et_ops; ndev->ethtool_ops = et_ops;
SET_NETDEV_DEV(ndev, dev); SET_NETDEV_DEV(ndev, dev);
ndev->if_port = port; ndev->if_port = port;
ndev->min_mtu = ETH_MIN_MTU;
self->ndev = ndev; self->ndev = ndev;
self->aq_pci_func = aq_pci_func; self->aq_pci_func = aq_pci_func;
...@@ -264,16 +261,16 @@ int aq_nic_ndev_register(struct aq_nic_s *self) ...@@ -264,16 +261,16 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent); ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
} }
#endif #endif
err = register_netdev(self->ndev);
if (err < 0)
goto err_exit;
self->is_ndev_registered = true;
netif_carrier_off(self->ndev); netif_carrier_off(self->ndev);
for (i = AQ_CFG_VECS_MAX; i--;) for (i = AQ_CFG_VECS_MAX; i--;)
aq_nic_ndev_queue_stop(self, i); aq_nic_ndev_queue_stop(self, i);
err = register_netdev(self->ndev);
if (err < 0)
goto err_exit;
err_exit: err_exit:
return err; return err;
} }
...@@ -296,7 +293,7 @@ void aq_nic_ndev_free(struct aq_nic_s *self) ...@@ -296,7 +293,7 @@ void aq_nic_ndev_free(struct aq_nic_s *self)
if (!self->ndev) if (!self->ndev)
goto err_exit; goto err_exit;
if (self->is_ndev_registered) if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev); unregister_netdev(self->ndev);
if (self->aq_hw) if (self->aq_hw)
...@@ -471,95 +468,116 @@ int aq_nic_start(struct aq_nic_s *self) ...@@ -471,95 +468,116 @@ int aq_nic_start(struct aq_nic_s *self)
return err; return err;
} }
static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
struct sk_buff *skb, struct sk_buff *skb,
struct aq_ring_buff_s *dx) struct aq_ring_s *ring)
{ {
unsigned int ret = 0U; unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U; unsigned int frag_count = 0U;
unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
dx->flags = 0U; if (unlikely(skb_is_gso(skb))) {
dx->len = skb_headlen(skb); dx_buff->flags = 0U;
dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
dx_buff->len_l3 = ip_hdrlen(skb);
dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U;
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
++ret;
}
dx_buff->flags = 0U;
dx_buff->len = skb_headlen(skb);
dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
skb->data,
dx_buff->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dx->len_pkt = skb->len;
dx->is_sop = 1U;
dx->is_mapped = 1U;
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
goto exit;
dx_buff->len_pkt = skb->len;
dx_buff->is_sop = 1U;
dx_buff->is_mapped = 1U;
++ret; ++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
dx->is_tcp_cso = 1U : 0U;
dx_buff->is_tcp_cso =
(ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
dx->is_udp_cso = dx_buff->is_udp_cso =
(ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
} }
for (; nr_frags--; ++frag_count) { for (; nr_frags--; ++frag_count) {
unsigned int frag_len; unsigned int frag_len = 0U;
dma_addr_t frag_pa; dma_addr_t frag_pa;
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
frag_len = skb_frag_size(frag); frag_len = skb_frag_size(frag);
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
frag_len, DMA_TO_DEVICE); frag_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
goto mapping_error;
while (frag_len > AQ_CFG_TX_FRAME_MAX) { while (frag_len > AQ_CFG_TX_FRAME_MAX) {
++dx; dx = aq_ring_next_dx(ring, dx);
++ret; dx_buff = &ring->buff_ring[dx];
dx->flags = 0U;
dx->len = AQ_CFG_TX_FRAME_MAX; dx_buff->flags = 0U;
dx->pa = frag_pa; dx_buff->len = AQ_CFG_TX_FRAME_MAX;
dx->is_mapped = 1U; dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
frag_len -= AQ_CFG_TX_FRAME_MAX; frag_len -= AQ_CFG_TX_FRAME_MAX;
frag_pa += AQ_CFG_TX_FRAME_MAX; frag_pa += AQ_CFG_TX_FRAME_MAX;
}
++dx;
++ret; ++ret;
dx->flags = 0U;
dx->len = frag_len;
dx->pa = frag_pa;
dx->is_mapped = 1U;
} }
dx->is_eop = 1U; dx = aq_ring_next_dx(ring, dx);
dx->skb = skb; dx_buff = &ring->buff_ring[dx];
return ret; dx_buff->flags = 0U;
} dx_buff->len = frag_len;
dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
++ret;
}
static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, dx_buff->is_eop = 1U;
struct sk_buff *skb, dx_buff->skb = skb;
struct aq_ring_buff_s *dx) goto exit;
{
dx->flags = 0U;
dx->len_pkt = skb->len;
dx->len_l2 = ETH_HLEN;
dx->len_l3 = ip_hdrlen(skb);
dx->len_l4 = tcp_hdrlen(skb);
dx->mss = skb_shinfo(skb)->gso_size;
dx->is_txc = 1U;
return 1U;
}
static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, mapping_error:
struct aq_ring_buff_s *dx) for (dx = ring->sw_tail;
{ ret > 0;
unsigned int ret = 0U; --ret, dx = aq_ring_next_dx(ring, dx)) {
dx_buff = &ring->buff_ring[dx];
if (unlikely(skb_is_gso(skb))) { if (!dx_buff->is_txc && dx_buff->pa) {
ret = aq_nic_map_skb_lso(self, skb, dx); if (unlikely(dx_buff->is_sop)) {
++dx; dma_unmap_single(aq_nic_get_dev(self),
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
} else {
dma_unmap_page(aq_nic_get_dev(self),
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
}
}
} }
ret += aq_nic_map_skb_frag(self, skb, dx); exit:
return ret; return ret;
} }
...@@ -572,18 +590,13 @@ __acquires(&ring->lock) ...@@ -572,18 +590,13 @@ __acquires(&ring->lock)
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U; unsigned int tc = 0U;
unsigned int trys = AQ_CFG_LOCK_TRYS; unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = 0; int err = NETDEV_TX_OK;
bool is_nic_in_bad_state; bool is_nic_in_bad_state;
bool is_busy = false;
struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
frags = skb_shinfo(skb)->nr_frags + 1; frags = skb_shinfo(skb)->nr_frags + 1;
ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)]; ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
atomic_inc(&self->header.busy_count);
is_busy = true;
if (frags > AQ_CFG_SKB_FRAGS_MAX) { if (frags > AQ_CFG_SKB_FRAGS_MAX) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
goto err_exit; goto err_exit;
...@@ -602,23 +615,27 @@ __acquires(&ring->lock) ...@@ -602,23 +615,27 @@ __acquires(&ring->lock)
do { do {
if (spin_trylock(&ring->header.lock)) { if (spin_trylock(&ring->header.lock)) {
frags = aq_nic_map_skb(self, skb, &buffers[0]); frags = aq_nic_map_skb(self, skb, ring);
aq_ring_tx_append_buffs(ring, &buffers[0], frags);
err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(
self->aq_hw,
ring, frags); ring, frags);
if (err >= 0) { if (err >= 0) {
if (aq_ring_avail_dx(ring) < if (aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX + 1) AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx); aq_nic_ndev_queue_stop(
} self,
spin_unlock(&ring->header.lock); ring->idx);
if (err >= 0) {
++ring->stats.tx.packets; ++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len; ring->stats.tx.bytes += skb->len;
} }
} else {
err = NETDEV_TX_BUSY;
}
spin_unlock(&ring->header.lock);
break; break;
} }
} while (--trys); } while (--trys);
...@@ -629,8 +646,6 @@ __acquires(&ring->lock) ...@@ -629,8 +646,6 @@ __acquires(&ring->lock)
} }
err_exit: err_exit:
if (is_busy)
atomic_dec(&self->header.busy_count);
return err; return err;
} }
...@@ -942,7 +957,7 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) ...@@ -942,7 +957,7 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
if (!netif_running(self->ndev)) { if (!netif_running(self->ndev)) {
err = 0; err = 0;
goto err_exit; goto out;
} }
rtnl_lock(); rtnl_lock();
if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
...@@ -967,8 +982,9 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) ...@@ -967,8 +982,9 @@ int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
netif_device_attach(self->ndev); netif_device_attach(self->ndev);
netif_tx_start_all_queues(self->ndev); netif_tx_start_all_queues(self->ndev);
} }
rtnl_unlock();
err_exit: err_exit:
rtnl_unlock();
out:
return err; return err;
} }
...@@ -22,7 +22,6 @@ struct aq_nic_s { ...@@ -22,7 +22,6 @@ struct aq_nic_s {
unsigned int aq_vecs; unsigned int aq_vecs;
unsigned int packet_filter; unsigned int packet_filter;
unsigned int power_state; unsigned int power_state;
bool is_ndev_registered;
u8 port; u8 port;
struct aq_hw_ops aq_hw_ops; struct aq_hw_ops aq_hw_ops;
struct aq_hw_caps_s aq_hw_caps; struct aq_hw_caps_s aq_hw_caps;
......
...@@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self) ...@@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self)
return 0; return 0;
} }
void aq_ring_tx_append_buffs(struct aq_ring_s *self,
struct aq_ring_buff_s *buffer,
unsigned int buffers)
{
if (likely(self->sw_tail + buffers < self->size)) {
memcpy(&self->buff_ring[self->sw_tail], buffer,
sizeof(buffer[0]) * buffers);
} else {
unsigned int first_part = self->size - self->sw_tail;
unsigned int second_part = buffers - first_part;
memcpy(&self->buff_ring[self->sw_tail], buffer,
sizeof(buffer[0]) * first_part);
memcpy(&self->buff_ring[0], &buffer[first_part],
sizeof(buffer[0]) * second_part);
}
}
void aq_ring_tx_clean(struct aq_ring_s *self) void aq_ring_tx_clean(struct aq_ring_s *self)
{ {
struct device *dev = aq_nic_get_dev(self->aq_nic); struct device *dev = aq_nic_get_dev(self->aq_nic);
...@@ -209,7 +190,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) ...@@ -209,7 +190,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
goto err_exit; goto err_exit;
} }
skb->dev = ndev;
skb_put(skb, buff->len); skb_put(skb, buff->len);
} else { } else {
skb = netdev_alloc_skb(ndev, ETH_HLEN); skb = netdev_alloc_skb(ndev, ETH_HLEN);
...@@ -271,6 +251,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) ...@@ -271,6 +251,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
int aq_ring_rx_fill(struct aq_ring_s *self) int aq_ring_rx_fill(struct aq_ring_s *self)
{ {
unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
(AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
struct aq_ring_buff_s *buff = NULL; struct aq_ring_buff_s *buff = NULL;
int err = 0; int err = 0;
int i = 0; int i = 0;
...@@ -283,7 +265,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self) ...@@ -283,7 +265,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->len = AQ_CFG_RX_FRAME_MAX; buff->len = AQ_CFG_RX_FRAME_MAX;
buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD | buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
__GFP_COMP, 0); __GFP_COMP, pages_order);
if (!buff->page) { if (!buff->page) {
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
......
...@@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, ...@@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
int aq_ring_init(struct aq_ring_s *self); int aq_ring_init(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self);
void aq_ring_tx_append_buffs(struct aq_ring_s *ring,
struct aq_ring_buff_s *buffer,
unsigned int buffers);
void aq_ring_tx_clean(struct aq_ring_s *self); void aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
int aq_ring_rx_fill(struct aq_ring_s *self); int aq_ring_rx_fill(struct aq_ring_s *self);
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
struct aq_obj_s { struct aq_obj_s {
spinlock_t lock; /* spinlock for nic/rings processing */ spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags; atomic_t flags;
atomic_t busy_count;
}; };
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask) static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
......
...@@ -659,8 +659,8 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -659,8 +659,8 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
} }
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = (rxd_wb->pkt_len & buff->len = rxd_wb->pkt_len %
(AQ_CFG_RX_FRAME_MAX - 1U)); AQ_CFG_RX_FRAME_MAX;
buff->len = buff->len ? buff->len = buff->len ?
buff->len : AQ_CFG_RX_FRAME_MAX; buff->len : AQ_CFG_RX_FRAME_MAX;
buff->next = 0U; buff->next = 0U;
......
...@@ -673,8 +673,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -673,8 +673,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
} }
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = (rxd_wb->pkt_len & buff->len = rxd_wb->pkt_len %
(AQ_CFG_RX_FRAME_MAX - 1U)); AQ_CFG_RX_FRAME_MAX;
buff->len = buff->len ? buff->len = buff->len ?
buff->len : AQ_CFG_RX_FRAME_MAX; buff->len : AQ_CFG_RX_FRAME_MAX;
buff->next = 0U; buff->next = 0U;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment