Commit 13b5b7fd authored by Sasha Neftin's avatar Sasha Neftin Committed by Jeff Kirsher

igc: Add support for Tx/Rx rings

This change adds the defines and structures necessary to support both Tx
and Rx descriptor rings.
Signed-off-by: default avatarSasha Neftin <sasha.neftin@intel.com>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 3df25e4c
...@@ -7,4 +7,4 @@ ...@@ -7,4 +7,4 @@
obj-$(CONFIG_IGC) += igc.o obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc-objs := igc_main.o igc_mac.o igc_base.o
...@@ -46,6 +46,45 @@ extern char igc_driver_version[]; ...@@ -46,6 +46,45 @@ extern char igc_driver_version[];
#define MAX_Q_VECTORS 8 #define MAX_Q_VECTORS 8
#define MAX_STD_JUMBO_FRAME_SIZE 9216 #define MAX_STD_JUMBO_FRAME_SIZE 9216
/* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048
#define IGC_RXBUFFER_3072 3072
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
* Setting this to 0 disables RX descriptor prefetch.
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
* available in host memory.
* If PTHRESH is 0, this should also be 0.
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
#define IGC_RX_PTHRESH 8
#define IGC_RX_HTHRESH 8
#define IGC_TX_PTHRESH 8
#define IGC_TX_HTHRESH 1
#define IGC_RX_WTHRESH 4
#define IGC_TX_WTHRESH 16
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#define IGC_TS_HDR_LEN 16
#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IGC_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
#else
#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
#endif
enum igc_state_t { enum igc_state_t {
__IGC_TESTING, __IGC_TESTING,
__IGC_RESETTING, __IGC_RESETTING,
...@@ -53,6 +92,33 @@ enum igc_state_t { ...@@ -53,6 +92,33 @@ enum igc_state_t {
__IGC_PTP_TX_IN_PROGRESS, __IGC_PTP_TX_IN_PROGRESS,
}; };
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
};
struct igc_rx_buffer {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
};
struct igc_tx_queue_stats { struct igc_tx_queue_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
...@@ -214,4 +280,63 @@ struct igc_adapter { ...@@ -214,4 +280,63 @@ struct igc_adapter {
struct igc_mac_addr *mac_table; struct igc_mac_addr *mac_table;
}; };
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
{
u16 ntc = ring->next_to_clean;
u16 ntu = ring->next_to_use;
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
{
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
enum igc_ring_flags_t {
IGC_RING_FLAG_RX_3K_BUFFER,
IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX,
IGC_RING_FLAG_TX_DETECT_HANG
};
#define ring_uses_large_buffer(ring) \
test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define ring_uses_build_skb(ring) \
test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IGC_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
#endif
return IGC_RXBUFFER_2048;
}
static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_RX_DESC(R, i) \
(&(((union igc_adv_rx_desc *)((R)->desc))[i]))
#define IGC_TX_DESC(R, i) \
(&(((union igc_adv_tx_desc *)((R)->desc))[i]))
#define IGC_TX_CTXTDESC(R, i) \
(&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
#endif /* _IGC_H_ */ #endif /* _IGC_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/delay.h>
#include "igc_hw.h"
#include "igc_i225.h"
/**
* igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
* @hw: pointer to the HW structure
*
* After Rx enable, if manageability is enabled then there is likely some
* bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
*/
void igc_rx_fifo_flush_base(struct igc_hw *hw)
{
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
/* disable IPv6 options as per hardware errata */
rfctl = rd32(IGC_RFCTL);
rfctl |= IGC_RFCTL_IPV6_EX_DIS;
wr32(IGC_RFCTL, rfctl);
if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
return;
/* Disable all Rx queues */
for (i = 0; i < 4; i++) {
rxdctl[i] = rd32(IGC_RXDCTL(i));
wr32(IGC_RXDCTL(i),
rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(IGC_RXDCTL(i));
if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
break;
}
if (ms_wait == 10)
pr_debug("Queue disable timed out after 10ms\n");
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
rlpml = rd32(IGC_RLPML);
wr32(IGC_RLPML, 0);
rctl = rd32(IGC_RCTL);
temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
temp_rctl |= IGC_RCTL_LPE;
wr32(IGC_RCTL, temp_rctl);
wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
wrfl();
usleep_range(2000, 3000);
/* Enable Rx queues that were previously enabled and restore our
* previous state
*/
for (i = 0; i < 4; i++)
wr32(IGC_RXDCTL(i), rxdctl[i]);
wr32(IGC_RCTL, rctl);
wrfl();
wr32(IGC_RLPML, rlpml);
wr32(IGC_RFCTL, rfctl);
/* Flush receive errors generated by workaround */
rd32(IGC_ROC);
rd32(IGC_RNBC);
rd32(IGC_MPC);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_BASE_H
#define _IGC_BASE_H
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
/* Transmit Descriptor - Advanced */
union igc_adv_tx_desc {
struct {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
__le64 rsvd; /* Reserved */
__le32 nxtseq_seed;
__le32 status;
} wb;
};
struct igc_adv_data_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
union {
u32 data;
struct {
u32 datalen:16; /* Data buffer length */
u32 rsvd:4;
u32 dtyp:4; /* Descriptor type */
u32 dcmd:8; /* Descriptor command */
} config;
} lower;
union {
u32 data;
struct {
u32 status:4; /* Descriptor status */
u32 idx:4;
u32 popts:6; /* Packet Options */
u32 paylen:18; /* Payload length */
} options;
} upper;
};
/* Receive Descriptor - Advanced */
union igc_adv_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
__le32 data;
struct {
__le16 pkt_info; /*RSS type, Pkt type*/
/* Split Header, header buffer len */
__le16 hdr_info;
} hs_rss;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length; /* Packet length */
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
/* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#endif /* _IGC_BASE_H */
...@@ -51,6 +51,10 @@ ...@@ -51,6 +51,10 @@
#define IGC_ICR_RXO BIT(6) /* Rx overrun */ #define IGC_ICR_RXO BIT(6) /* Rx overrun */
#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ #define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ #define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define IGC_ICR_INT_ASSERTED BIT(31)
#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ #define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IMS_ENABLE_MASK ( \ #define IMS_ENABLE_MASK ( \
...@@ -80,6 +84,45 @@ ...@@ -80,6 +84,45 @@
#define IGC_GPIE_EIAME 0x40000000 #define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000 #define IGC_GPIE_PBA 0x80000000
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
#define IGC_CT_SHIFT 4
#define IGC_COLLISION_THRESHOLD 15
/* Management Control */
#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
/* Receive Control */
#define IGC_RCTL_RST 0x00000001 /* Software reset */
#define IGC_RCTL_EN 0x00000002 /* enable */
#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
#define IGC_RFCTL_LEF 0x00040000
#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
#define IGC_N0_QUEUE -1 #define IGC_N0_QUEUE -1
#endif /* _IGC_DEFINES_H_ */ #endif /* _IGC_DEFINES_H_ */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "igc_defines.h" #include "igc_defines.h"
#include "igc_mac.h" #include "igc_mac.h"
#include "igc_i225.h" #include "igc_i225.h"
#include "igc_base.h"
#define IGC_DEV_ID_I225_LM 0x15F2 #define IGC_DEV_ID_I225_LM 0x15F2
#define IGC_DEV_ID_I225_V 0x15F3 #define IGC_DEV_ID_I225_V 0x15F3
......
This diff is collapsed.
...@@ -168,6 +168,9 @@ ...@@ -168,6 +168,9 @@
#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ #define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ #define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
/* forward declaration */ /* forward declaration */
struct igc_hw; struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg); u32 igc_rd32(struct igc_hw *hw, u32 reg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment