Commit ed4d81c4 authored by Igor Russkikh's avatar Igor Russkikh Committed by David S. Miller

net: aquantia: when cleaning hw cache it should be toggled

>From HW specification to correctly reset HW caches (this is a required
workaround when stopping the device), register bit should actually
be toggled.

It was previosly always just set. Due to the way driver stops HW this
never actually caused any issues, but it still may, so cleaning this up.

Fixes: 7a1bb494 ("net: aquantia: fix potential IOMMU fault after driver unbind")
Signed-off-by: default avatarIgor Russkikh <igor.russkikh@aquantia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 06b0d7fe
......@@ -968,14 +968,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
int err;
u32 val;
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
return aq_hw_err_from_flags(self);
err = aq_hw_err_from_flags(self);
if (err)
goto err_exit;
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
self, val, val == 1, 1000U, 10000U);
err_exit:
return err;
}
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
......
......@@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
{
u32 val;
val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
init);
val ^ 1);
}
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
}
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
......
......@@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
/* set rdm rx dma descriptor cache init */
void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
/* toggle rdm rx dma descriptor cache init */
void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
/* get rdm rx dma descriptor cache init done */
u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
......
......@@ -318,6 +318,25 @@
/* default value of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
/* rdm_desc_init_done_i bitfield definitions
* preprocessor definitions for the bitfield rdm_desc_init_done_i.
* port="pif_rdm_desc_init_done_i"
*/
/* register address for bitfield rdm_desc_init_done_i */
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
/* bitmask for bitfield rdm_desc_init_done_i */
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
/* inverted bitmask for bitfield rdm_desc_init_done_i */
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
/* lower bit position of bitfield rdm_desc_init_done_i */
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
/* width of bitfield rdm_desc_init_done_i */
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
/* default value of bitfield rdm_desc_init_done_i */
#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment