Commit da7d4b42 authored by John Fraker's avatar John Fraker Committed by Jakub Kicinski

gve: Remove dependency on 4k page size.

Prior to this change, gve crashes when attempting to run in kernels with
page sizes other than 4k. This change removes unnecessary references to
PAGE_SIZE and replaces them with more meaningful constants.
Signed-off-by: default avatarJordan Kimbrough <jrkim@google.com>
Signed-off-by: default avatarJohn Fraker <jfraker@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/r/20231128002648.320892-6-jfraker@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 513072fb
...@@ -49,7 +49,9 @@ ...@@ -49,7 +49,9 @@
/* PTYPEs are always 10 bits. */ /* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024 #define GVE_NUM_PTYPES 1024
#define GVE_RX_BUFFER_SIZE_DQO 2048 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5 #define GVE_XDP_ACTIONS 5
......
...@@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev, ...@@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK: case ETHTOOL_RX_COPYBREAK:
{ {
u32 max_copybreak = gve_is_gqi(priv) ? u32 max_copybreak = gve_is_gqi(priv) ?
(PAGE_SIZE / 2) : priv->data_buffer_size_dqo; GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
len = *(u32 *)value; len = *(u32 *)value;
if (len > max_copybreak) if (len > max_copybreak)
......
...@@ -1328,7 +1328,7 @@ static int gve_open(struct net_device *dev) ...@@ -1328,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for /* Hard code this for now. This may be tuned in the future for
* performance. * performance.
*/ */
priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
} }
err = gve_create_rings(priv); err = gve_create_rings(priv);
if (err) if (err)
...@@ -1664,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev) ...@@ -1664,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) { if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n", netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu); dev->mtu);
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -283,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -283,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster /* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages. * than copying or allocating new pages.
*/ */
rx->packet_buffer_size = PAGE_SIZE / 2; rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx); gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx); gve_rx_add_to_block(priv, idx);
...@@ -399,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, ...@@ -399,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr) static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{ {
const __be64 offset = cpu_to_be64(PAGE_SIZE / 2); const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
/* "flip" to other packet buffer on this page */ /* "flip" to other packet buffer on this page */
page_info->page_offset ^= PAGE_SIZE / 2; page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset; *(slot_addr) ^= offset;
} }
...@@ -507,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, ...@@ -507,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL; return NULL;
gve_dec_pagecnt_bias(copy_page_info); gve_dec_pagecnt_bias(copy_page_info);
copy_page_info->page_offset += rx->packet_buffer_size; copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
copy_page_info->page_offset &= (PAGE_SIZE - 1);
if (copy_page_info->can_flip) { if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it /* We have used both halves of this copy page, it
......
...@@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0; return 0;
} }
#define GVE_TX_START_THRESH PAGE_SIZE #define GVE_TX_START_THRESH 4096
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake) u32 to_do, bool try_to_wake)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment