Commit 6a55ca6b authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski

udp: move udpv4_offload and udpv6_offload to net_hotdata

These structures are used in GRO and GSO paths.
Move them to net_hodata for better cache locality.

v2: udpv6_offload definition depends on CONFIG_INET=y
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20240306160031.874438-12-edumazet@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent aa70d2d1
...@@ -11,8 +11,10 @@ struct net_hotdata { ...@@ -11,8 +11,10 @@ struct net_hotdata {
#if IS_ENABLED(CONFIG_INET) #if IS_ENABLED(CONFIG_INET)
struct packet_offload ip_packet_offload; struct packet_offload ip_packet_offload;
struct net_offload tcpv4_offload; struct net_offload tcpv4_offload;
struct net_offload udpv4_offload;
struct packet_offload ipv6_packet_offload; struct packet_offload ipv6_packet_offload;
struct net_offload tcpv6_offload; struct net_offload tcpv6_offload;
struct net_offload udpv6_offload;
#endif #endif
struct list_head offload_base; struct list_head offload_base;
struct list_head ptype_all; struct list_head ptype_all;
......
...@@ -737,15 +737,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) ...@@ -737,15 +737,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
} }
static const struct net_offload udpv4_offload = {
.callbacks = {
.gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},
};
int __init udpv4_offload_init(void) int __init udpv4_offload_init(void)
{ {
return inet_add_offload(&udpv4_offload, IPPROTO_UDP); net_hotdata.udpv4_offload = (struct net_offload) {
.callbacks = {
.gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},
};
return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
} }
...@@ -192,20 +192,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) ...@@ -192,20 +192,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb); return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
} }
static const struct net_offload udpv6_offload = { int __init udpv6_offload_init(void)
.callbacks = {
.gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},
};
int udpv6_offload_init(void)
{ {
return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); net_hotdata.udpv6_offload = (struct net_offload) {
.callbacks = {
.gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},
};
return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
} }
int udpv6_offload_exit(void) int udpv6_offload_exit(void)
{ {
return inet6_del_offload(&udpv6_offload, IPPROTO_UDP); return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment