Commit 6df5713e authored by David S. Miller's avatar David S. Miller

Merge branch 'gve-jumbo-frame'

Jeroen de Borst says:

====================
gve: Add jumbo-frame support for GQ

This patchset introduces jumbo-frame support for the GQ queue format.
The device already supports jumbo-frames on TX. This introduces
multi-descriptor RX packets using a packet continuation bit.

A widely deployed driver has a bug with causes it to fail to load
when a MTU greater than 2048 bytes is configured. A jumbo-frame device
option is introduced to pass a jumbo-frame MTU only to drivers that
support it.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 233cdfba 255489f5
...@@ -142,6 +142,19 @@ struct gve_index_list { ...@@ -142,6 +142,19 @@ struct gve_index_list {
s16 tail; s16 tail;
}; };
/* A single received packet split across multiple buffers may be
* reconstructed using the information in this structure.
*/
struct gve_rx_ctx {
/* head and tail of skb chain for the current packet or NULL if none */
struct sk_buff *skb_head;
struct sk_buff *skb_tail;
u16 total_expected_size;
u8 expected_frag_cnt;
u8 curr_frag_cnt;
u8 reuse_frags;
};
/* Contains datapath state used to represent an RX queue. */ /* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring { struct gve_rx_ring {
struct gve_priv *gve; struct gve_priv *gve;
...@@ -153,6 +166,7 @@ struct gve_rx_ring { ...@@ -153,6 +166,7 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */ /* threshold for posting new buffs and descs */
u32 db_threshold; u32 db_threshold;
u16 packet_buffer_size;
}; };
/* DQO fields. */ /* DQO fields. */
...@@ -200,15 +214,16 @@ struct gve_rx_ring { ...@@ -200,15 +214,16 @@ struct gve_rx_ring {
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
u32 q_num; /* queue index */ u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */ u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
dma_addr_t q_resources_bus; /* dma address for the queue resources */ dma_addr_t q_resources_bus; /* dma address for the queue resources */
struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct u64_stats_sync statss; /* sync stats for 32bit archs */
/* head and tail of skb chain for the current packet or NULL if none */ struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
struct sk_buff *skb_head;
struct sk_buff *skb_tail;
}; };
/* A TX desc ring entry */ /* A TX desc ring entry */
......
...@@ -38,7 +38,8 @@ void gve_parse_device_option(struct gve_priv *priv, ...@@ -38,7 +38,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option *option, struct gve_device_option *option,
struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda) struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{ {
u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length); u16 option_length = be16_to_cpu(option->option_length);
...@@ -111,6 +112,24 @@ void gve_parse_device_option(struct gve_priv *priv, ...@@ -111,6 +112,24 @@ void gve_parse_device_option(struct gve_priv *priv,
} }
*dev_op_dqo_rda = (void *)(option + 1); *dev_op_dqo_rda = (void *)(option + 1);
break; break;
case GVE_DEV_OPT_ID_JUMBO_FRAMES:
if (option_length < sizeof(**dev_op_jumbo_frames) ||
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
"Jumbo Frames",
(int)sizeof(**dev_op_jumbo_frames),
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
option_length, req_feat_mask);
break;
}
if (option_length > sizeof(**dev_op_jumbo_frames)) {
dev_warn(&priv->pdev->dev,
GVE_DEVICE_OPTION_TOO_BIG_FMT,
"Jumbo Frames");
}
*dev_op_jumbo_frames = (void *)(option + 1);
break;
default: default:
/* If we don't recognize the option just continue /* If we don't recognize the option just continue
* without doing anything. * without doing anything.
...@@ -126,7 +145,8 @@ gve_process_device_options(struct gve_priv *priv, ...@@ -126,7 +145,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_descriptor *descriptor, struct gve_device_descriptor *descriptor,
struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda) struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{ {
const int num_options = be16_to_cpu(descriptor->num_device_options); const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt; struct gve_device_option *dev_opt;
...@@ -146,7 +166,7 @@ gve_process_device_options(struct gve_priv *priv, ...@@ -146,7 +166,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt, gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda); dev_op_dqo_rda, dev_op_jumbo_frames);
dev_opt = next_opt; dev_opt = next_opt;
} }
...@@ -530,6 +550,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -530,6 +550,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(rx->data.data_bus), cpu_to_be64(rx->data.data_bus),
cmd.create_rx_queue.index = cpu_to_be32(queue_index); cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else { } else {
cmd.create_rx_queue.rx_ring_size = cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt); cpu_to_be16(priv->rx_desc_cnt);
...@@ -660,12 +681,31 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv, ...@@ -660,12 +681,31 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
return 0; return 0;
} }
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
* priv->dev->max_mtu. We overwrite it with the true max MTU below.
*/
if (dev_op_jumbo_frames &&
(supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
dev_info(&priv->pdev->dev,
"JUMBO FRAMES device option enabled.\n");
priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
}
}
int gve_adminq_describe_device(struct gve_priv *priv) int gve_adminq_describe_device(struct gve_priv *priv)
{ {
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
struct gve_device_descriptor *descriptor; struct gve_device_descriptor *descriptor;
u32 supported_features_mask = 0;
union gve_adminq_command cmd; union gve_adminq_command cmd;
dma_addr_t descriptor_bus; dma_addr_t descriptor_bus;
int err = 0; int err = 0;
...@@ -689,7 +729,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) ...@@ -689,7 +729,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
goto free_device_descriptor; goto free_device_descriptor;
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda); &dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames);
if (err) if (err)
goto free_device_descriptor; goto free_device_descriptor;
...@@ -704,12 +745,19 @@ int gve_adminq_describe_device(struct gve_priv *priv) ...@@ -704,12 +745,19 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->queue_format = GVE_DQO_RDA_FORMAT; priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev, dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n"); "Driver is running with DQO RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
} else if (dev_op_gqi_rda) { } else if (dev_op_gqi_rda) {
priv->queue_format = GVE_GQI_RDA_FORMAT; priv->queue_format = GVE_GQI_RDA_FORMAT;
dev_info(&priv->pdev->dev, dev_info(&priv->pdev->dev,
"Driver is running with GQI RDA queue format.\n"); "Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
} else { } else {
priv->queue_format = GVE_GQI_QPL_FORMAT; priv->queue_format = GVE_GQI_QPL_FORMAT;
if (dev_op_gqi_qpl)
supported_features_mask =
be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
dev_info(&priv->pdev->dev, dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n"); "Driver is running with GQI QPL queue format.\n");
} }
...@@ -746,6 +794,9 @@ int gve_adminq_describe_device(struct gve_priv *priv) ...@@ -746,6 +794,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
} }
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames);
free_device_descriptor: free_device_descriptor:
dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
descriptor_bus); descriptor_bus);
......
...@@ -108,6 +108,14 @@ struct gve_device_option_dqo_rda { ...@@ -108,6 +108,14 @@ struct gve_device_option_dqo_rda {
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
struct gve_device_option_jumbo_frames {
__be32 supported_features_mask;
__be16 max_mtu;
u8 padding[2];
};
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
/* Terminology: /* Terminology:
* *
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
...@@ -121,6 +129,7 @@ enum gve_dev_opt_id { ...@@ -121,6 +129,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_GQI_RDA = 0x2, GVE_DEV_OPT_ID_GQI_RDA = 0x2,
GVE_DEV_OPT_ID_GQI_QPL = 0x3, GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4, GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
}; };
enum gve_dev_opt_req_feat_mask { enum gve_dev_opt_req_feat_mask {
...@@ -128,6 +137,11 @@ enum gve_dev_opt_req_feat_mask { ...@@ -128,6 +137,11 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
}; };
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
......
...@@ -90,12 +90,13 @@ union gve_rx_data_slot { ...@@ -90,12 +90,13 @@ union gve_rx_data_slot {
/* GVE Recive Packet Descriptor Flags */ /* GVE Recive Packet Descriptor Flags */
#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x))) #define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ #define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ #define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */ #define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */ #define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */ #define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */ #define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
#define GVE_RXF_PKT_CONT GVE_RXFLG(10) /* Multi Fragment RX packet */
/* GVE IRQ */ /* GVE IRQ */
#define GVE_IRQ_ACK BIT(31) #define GVE_IRQ_ACK BIT(31)
......
...@@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { ...@@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]", "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
...@@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
data[i++] = tmp_rx_bytes; data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
/* rx dropped packets */ /* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail + data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail + tmp_rx_buf_alloc_fail +
......
...@@ -1371,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) ...@@ -1371,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
"Could not get device information: err=%d\n", err); "Could not get device information: err=%d\n", err);
goto err; goto err;
} }
if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
dev_err(&priv->pdev->dev, "Could not set mtu");
goto err;
}
}
priv->dev->mtu = priv->dev->max_mtu; priv->dev->mtu = priv->dev->max_mtu;
num_ntfy = pci_msix_vec_count(priv->pdev); num_ntfy = pci_msix_vec_count(priv->pdev);
if (num_ntfy <= 0) { if (num_ntfy <= 0) {
......
This diff is collapsed.
...@@ -240,8 +240,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -240,8 +240,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->dqo.bufq.mask = buffer_queue_slots - 1; rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.complq.num_free_slots = completion_queue_slots; rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1; rx->dqo.complq.mask = completion_queue_slots - 1;
rx->skb_head = NULL; rx->ctx.skb_head = NULL;
rx->skb_tail = NULL; rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4); rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
...@@ -467,12 +467,12 @@ static void gve_rx_skb_hash(struct sk_buff *skb, ...@@ -467,12 +467,12 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
static void gve_rx_free_skb(struct gve_rx_ring *rx) static void gve_rx_free_skb(struct gve_rx_ring *rx)
{ {
if (!rx->skb_head) if (!rx->ctx.skb_head)
return; return;
dev_kfree_skb_any(rx->skb_head); dev_kfree_skb_any(rx->ctx.skb_head);
rx->skb_head = NULL; rx->ctx.skb_head = NULL;
rx->skb_tail = NULL; rx->ctx.skb_tail = NULL;
} }
/* Chains multi skbs for single rx packet. /* Chains multi skbs for single rx packet.
...@@ -483,7 +483,7 @@ static int gve_rx_append_frags(struct napi_struct *napi, ...@@ -483,7 +483,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
u16 buf_len, struct gve_rx_ring *rx, u16 buf_len, struct gve_rx_ring *rx,
struct gve_priv *priv) struct gve_priv *priv)
{ {
int num_frags = skb_shinfo(rx->skb_tail)->nr_frags; int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
if (unlikely(num_frags == MAX_SKB_FRAGS)) { if (unlikely(num_frags == MAX_SKB_FRAGS)) {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -492,17 +492,17 @@ static int gve_rx_append_frags(struct napi_struct *napi, ...@@ -492,17 +492,17 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb) if (!skb)
return -1; return -1;
skb_shinfo(rx->skb_tail)->frag_list = skb; skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
rx->skb_tail = skb; rx->ctx.skb_tail = skb;
num_frags = 0; num_frags = 0;
} }
if (rx->skb_tail != rx->skb_head) { if (rx->ctx.skb_tail != rx->ctx.skb_head) {
rx->skb_head->len += buf_len; rx->ctx.skb_head->len += buf_len;
rx->skb_head->data_len += buf_len; rx->ctx.skb_head->data_len += buf_len;
rx->skb_head->truesize += priv->data_buffer_size_dqo; rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
} }
skb_add_rx_frag(rx->skb_tail, num_frags, skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
buf_state->page_info.page, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_state->page_info.page_offset,
buf_len, priv->data_buffer_size_dqo); buf_len, priv->data_buffer_size_dqo);
...@@ -556,7 +556,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -556,7 +556,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len, DMA_FROM_DEVICE); buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */ /* Append to current skb if one exists. */
if (rx->skb_head) { if (rx->ctx.skb_head) {
if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
priv)) != 0) { priv)) != 0) {
goto error; goto error;
...@@ -567,11 +567,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -567,11 +567,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
} }
if (eop && buf_len <= priv->rx_copybreak) { if (eop && buf_len <= priv->rx_copybreak) {
rx->skb_head = gve_rx_copy(priv->dev, napi, rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0); &buf_state->page_info, buf_len, 0, NULL);
if (unlikely(!rx->skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->skb_tail = rx->skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++; rx->rx_copied_pkt++;
...@@ -583,12 +583,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -583,12 +583,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0; return 0;
} }
rx->skb_head = napi_get_frags(napi); rx->ctx.skb_head = napi_get_frags(napi);
if (unlikely(!rx->skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->skb_tail = rx->skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page, skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len, buf_state->page_info.page_offset, buf_len,
priv->data_buffer_size_dqo); priv->data_buffer_size_dqo);
gve_dec_pagecnt_bias(&buf_state->page_info); gve_dec_pagecnt_bias(&buf_state->page_info);
...@@ -635,27 +635,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, ...@@ -635,27 +635,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
int err; int err;
skb_record_rx_queue(rx->skb_head, rx->q_num); skb_record_rx_queue(rx->ctx.skb_head, rx->q_num);
if (feat & NETIF_F_RXHASH) if (feat & NETIF_F_RXHASH)
gve_rx_skb_hash(rx->skb_head, desc, ptype); gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype);
if (feat & NETIF_F_RXCSUM) if (feat & NETIF_F_RXCSUM)
gve_rx_skb_csum(rx->skb_head, desc, ptype); gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
/* RSC packets must set gso_size otherwise the TCP stack will complain /* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU. * that packets are larger than MTU.
*/ */
if (desc->rsc) { if (desc->rsc) {
err = gve_rx_complete_rsc(rx->skb_head, desc, ptype); err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype);
if (err < 0) if (err < 0)
return err; return err;
} }
if (skb_headlen(rx->skb_head) == 0) if (skb_headlen(rx->ctx.skb_head) == 0)
napi_gro_frags(napi); napi_gro_frags(napi);
else else
napi_gro_receive(napi, rx->skb_head); napi_gro_receive(napi, rx->ctx.skb_head);
return 0; return 0;
} }
...@@ -717,18 +717,18 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) ...@@ -717,18 +717,18 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Free running counter of completed descriptors */ /* Free running counter of completed descriptors */
rx->cnt++; rx->cnt++;
if (!rx->skb_head) if (!rx->ctx.skb_head)
continue; continue;
if (!compl_desc->end_of_packet) if (!compl_desc->end_of_packet)
continue; continue;
work_done++; work_done++;
pkt_bytes = rx->skb_head->len; pkt_bytes = rx->ctx.skb_head->len;
/* The ethernet header (first ETH_HLEN bytes) is snipped off /* The ethernet header (first ETH_HLEN bytes) is snipped off
* by eth_type_trans. * by eth_type_trans.
*/ */
if (skb_headlen(rx->skb_head)) if (skb_headlen(rx->ctx.skb_head))
pkt_bytes += ETH_HLEN; pkt_bytes += ETH_HLEN;
/* gve_rx_complete_skb() will consume skb if successful */ /* gve_rx_complete_skb() will consume skb if successful */
...@@ -741,8 +741,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) ...@@ -741,8 +741,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
} }
bytes += pkt_bytes; bytes += pkt_bytes;
rx->skb_head = NULL; rx->ctx.skb_head = NULL;
rx->skb_tail = NULL; rx->ctx.skb_tail = NULL;
} }
gve_rx_post_buffers_dqo(rx); gve_rx_post_buffers_dqo(rx);
......
...@@ -50,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -50,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len,
u16 pad) u16 padding, struct gve_rx_ctx *ctx)
{ {
struct sk_buff *skb = napi_alloc_skb(napi, len); void *va = page_info->page_address + padding + page_info->page_offset;
void *va = page_info->page_address + pad + int skb_linear_offset = 0;
page_info->page_offset; bool set_protocol = false;
struct sk_buff *skb;
if (unlikely(!skb))
return NULL; if (ctx) {
if (!ctx->skb_head)
ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
if (unlikely(!ctx->skb_head))
return NULL;
skb = ctx->skb_head;
skb_linear_offset = skb->len;
set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
} else {
skb = napi_alloc_skb(napi, len);
set_protocol = true;
}
__skb_put(skb, len); __skb_put(skb, len);
skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
skb_copy_to_linear_data(skb, va, len); if (set_protocol)
skb->protocol = eth_type_trans(skb, dev);
skb->protocol = eth_type_trans(skb, dev);
return skb; return skb;
} }
......
...@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); ...@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len,
u16 pad); u16 pad, struct gve_rx_ctx *ctx);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment