Commit bb837a37 authored by Gerhard Engleder's avatar Gerhard Engleder Committed by David S. Miller

tsnep: Use page pool for RX

Use page pool for RX buffer handling. Makes RX path more efficient and
is required prework for future XDP support.
Signed-off-by: default avatarGerhard Engleder <gerhard@engleder-embedded.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 308ce142
...@@ -21,6 +21,7 @@ config TSNEP ...@@ -21,6 +21,7 @@ config TSNEP
depends on HAS_IOMEM && HAS_DMA depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB select PHYLIB
select PAGE_POOL
help help
Support for the Engleder TSN endpoint Ethernet MAC IP Core. Support for the Engleder TSN endpoint Ethernet MAC IP Core.
......
...@@ -96,9 +96,9 @@ struct tsnep_rx_entry { ...@@ -96,9 +96,9 @@ struct tsnep_rx_entry {
u32 properties; u32 properties;
struct sk_buff *skb; struct page *page;
size_t len; size_t len;
DEFINE_DMA_UNMAP_ADDR(dma); dma_addr_t dma;
}; };
struct tsnep_rx { struct tsnep_rx {
...@@ -113,6 +113,7 @@ struct tsnep_rx { ...@@ -113,6 +113,7 @@ struct tsnep_rx {
int read; int read;
u32 owner_counter; u32 owner_counter;
int increment_owner_counter; int increment_owner_counter;
struct page_pool *page_pool;
u32 packets; u32 packets;
u32 bytes; u32 bytes;
......
...@@ -27,10 +27,10 @@ ...@@ -27,10 +27,10 @@
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \ #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4)) #define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN) #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
...@@ -587,14 +587,15 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) ...@@ -587,14 +587,15 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
for (i = 0; i < TSNEP_RING_SIZE; i++) { for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i]; entry = &rx->entry[i];
if (dma_unmap_addr(entry, dma)) if (entry->page)
dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), page_pool_put_full_page(rx->page_pool, entry->page,
dma_unmap_len(entry, len), false);
DMA_FROM_DEVICE); entry->page = NULL;
if (entry->skb)
dev_kfree_skb(entry->skb);
} }
if (rx->page_pool)
page_pool_destroy(rx->page_pool);
memset(rx->entry, 0, sizeof(rx->entry)); memset(rx->entry, 0, sizeof(rx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
...@@ -607,31 +608,19 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) ...@@ -607,31 +608,19 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
} }
} }
static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx, static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
struct tsnep_rx_entry *entry) struct tsnep_rx_entry *entry)
{ {
struct device *dmadev = rx->adapter->dmadev; struct page *page;
struct sk_buff *skb;
dma_addr_t dma;
skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH, page = page_pool_dev_alloc_pages(rx->page_pool);
GFP_ATOMIC | GFP_DMA); if (unlikely(!page))
if (!skb)
return -ENOMEM; return -ENOMEM;
skb_reserve(skb, RX_SKB_RESERVE); entry->page = page;
entry->len = TSNEP_MAX_RX_BUF_SIZE;
dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH, entry->dma = page_pool_get_dma_addr(entry->page);
DMA_FROM_DEVICE); entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
if (dma_mapping_error(dmadev, dma)) {
dev_kfree_skb(skb);
return -ENOMEM;
}
entry->skb = skb;
entry->len = RX_SKB_LENGTH;
dma_unmap_addr_set(entry, dma, dma);
entry->desc->rx = __cpu_to_le64(dma);
return 0; return 0;
} }
...@@ -640,6 +629,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) ...@@ -640,6 +629,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
{ {
struct device *dmadev = rx->adapter->dmadev; struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry; struct tsnep_rx_entry *entry;
struct page_pool_params pp_params = { 0 };
struct tsnep_rx_entry *next_entry; struct tsnep_rx_entry *next_entry;
int i, j; int i, j;
int retval; int retval;
...@@ -661,12 +651,28 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) ...@@ -661,12 +651,28 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
} }
} }
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.order = 0;
pp_params.pool_size = TSNEP_RING_SIZE;
pp_params.nid = dev_to_node(dmadev);
pp_params.dev = dmadev;
pp_params.dma_dir = DMA_FROM_DEVICE;
pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
pp_params.offset = TSNEP_SKB_PAD;
rx->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx->page_pool)) {
retval = PTR_ERR(rx->page_pool);
rx->page_pool = NULL;
goto failed;
}
for (i = 0; i < TSNEP_RING_SIZE; i++) { for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i]; entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma); entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
retval = tsnep_rx_alloc_and_map_skb(rx, entry); retval = tsnep_rx_alloc_buffer(rx, entry);
if (retval) if (retval)
goto failed; goto failed;
} }
...@@ -682,7 +688,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index) ...@@ -682,7 +688,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{ {
struct tsnep_rx_entry *entry = &rx->entry[index]; struct tsnep_rx_entry *entry = &rx->entry[index];
/* RX_SKB_LENGTH is a multiple of 4 */ /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (index == rx->increment_owner_counter) { if (index == rx->increment_owner_counter) {
...@@ -705,19 +711,52 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index) ...@@ -705,19 +711,52 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
entry->desc->properties = __cpu_to_le32(entry->properties); entry->desc->properties = __cpu_to_le32(entry->properties);
} }
static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
int length)
{
struct sk_buff *skb;
skb = napi_build_skb(page_address(page), PAGE_SIZE);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
__skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
struct tsnep_rx_inline *rx_inline =
(struct tsnep_rx_inline *)(page_address(page) +
TSNEP_SKB_PAD);
skb_shinfo(skb)->tx_flags |=
SKBTX_HW_TSTAMP_NETDEV;
memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->netdev_data = rx_inline;
}
skb_record_rx_queue(skb, rx->queue_index);
skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
return skb;
}
static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget) int budget)
{ {
struct device *dmadev = rx->adapter->dmadev; struct device *dmadev = rx->adapter->dmadev;
int done = 0; int done = 0;
enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry; struct tsnep_rx_entry *entry;
struct page *page;
struct sk_buff *skb; struct sk_buff *skb;
size_t len;
dma_addr_t dma;
int length; int length;
bool enable = false; bool enable = false;
int retval; int retval;
dma_dir = page_pool_get_dma_dir(rx->page_pool);
while (likely(done < budget)) { while (likely(done < budget)) {
entry = &rx->entry[rx->read]; entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) & if ((__le32_to_cpu(entry->desc_wb->properties) &
...@@ -730,43 +769,34 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, ...@@ -730,43 +769,34 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/ */
dma_rmb(); dma_rmb();
skb = entry->skb; prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
len = dma_unmap_len(entry, len); length = __le32_to_cpu(entry->desc_wb->properties) &
dma = dma_unmap_addr(entry, dma); TSNEP_DESC_LENGTH_MASK;
dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
length, dma_dir);
page = entry->page;
/* forward skb only if allocation is successful, otherwise /* forward skb only if allocation is successful, otherwise
* skb is reused and frame dropped * page is reused and frame dropped
*/ */
retval = tsnep_rx_alloc_and_map_skb(rx, entry); retval = tsnep_rx_alloc_buffer(rx, entry);
if (!retval) { if (!retval) {
dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE); skb = tsnep_build_skb(rx, page, length);
if (skb) {
length = __le32_to_cpu(entry->desc_wb->properties) & page_pool_release_page(rx->page_pool, page);
TSNEP_DESC_LENGTH_MASK;
skb_put(skb, length - ETH_FCS_LEN);
if (rx->adapter->hwtstamp_config.rx_filter ==
HWTSTAMP_FILTER_ALL) {
struct skb_shared_hwtstamps *hwtstamps =
skb_hwtstamps(skb);
struct tsnep_rx_inline *rx_inline =
(struct tsnep_rx_inline *)skb->data;
skb_shinfo(skb)->tx_flags |=
SKBTX_HW_TSTAMP_NETDEV;
memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->netdev_data = rx_inline;
}
skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
skb_record_rx_queue(skb, rx->queue_index);
skb->protocol = eth_type_trans(skb,
rx->adapter->netdev);
rx->packets++; rx->packets++;
rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; rx->bytes += length -
TSNEP_RX_INLINE_METADATA_SIZE;
if (skb->pkt_type == PACKET_MULTICAST) if (skb->pkt_type == PACKET_MULTICAST)
rx->multicast++; rx->multicast++;
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
} else {
page_pool_recycle_direct(rx->page_pool, page);
rx->dropped++;
}
done++; done++;
} else { } else {
rx->dropped++; rx->dropped++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment