Commit 5428aef8 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains Netfilter updates for your net-next
tree. Basically, improvements for the packet rejection infrastructure,
deprecation of CLUSTERIP, cleanups for nf_tables and some untangling for
br_netfilter. More specifically they are:

1) Send packet to reset flow if checksum is valid, from Florian Westphal.

2) Fix nf_tables reject bridge from the input chain, also from Florian.

3) Deprecate the CLUSTERIP target, the cluster match supersedes it in
   functionality and it's known to have problems.

4) A couple of cleanups for nf_tables rule tracing infrastructure, from
   Patrick McHardy.

5) Another cleanup to place transaction declarations at the bottom of
   nf_tables.h, also from Patrick.

6) Consolidate Kconfig dependencies wrt. NF_TABLES.

7) Limit table names to 32 bytes in nf_tables.

8) mac header copying in bridge netfilter is already required when
   calling ip_fragment(), from Florian Westphal.

9) move nf_bridge_update_protocol() to br_netfilter.c, also from
   Florian.

10) Small refactor in br_netfilter in the transmission path, again from
    Florian.

11) Move br_nf_pre_routing_finish_bridge_slow() to br_netfilter.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 26c459a8 e5de75bf
......@@ -36,44 +36,6 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
}
}
static inline void nf_bridge_update_protocol(struct sk_buff *skb)
{
if (skb->nf_bridge->mask & BRNF_8021Q)
skb->protocol = htons(ETH_P_8021Q);
else if (skb->nf_bridge->mask & BRNF_PPPoE)
skb->protocol = htons(ETH_P_PPP_SES);
}
/* Fill in the header for fragmented IP packets handled by
* the IPv4 connection tracking code.
*
* Only used in br_forward.c
*/
static inline int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
unsigned int header_size;
nf_bridge_update_protocol(skb);
header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow_head(skb, header_size);
if (err)
return err;
skb_copy_to_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
__skb_push(skb, nf_bridge_encap_header_len(skb));
return 0;
}
static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
{
if (skb->nf_bridge &&
skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
return nf_bridge_copy_header(skb);
return 0;
}
static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
......@@ -82,18 +44,6 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
}
int br_handle_frame_finish(struct sk_buff *skb);
/* Only used in br_device.c */
static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
skb_pull(skb, ETH_HLEN);
nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
skb->dev = nf_bridge->physindev;
return br_handle_frame_finish(skb);
}
/* This is called by the IP fragmenting code and it ensures there is
* enough room for the encapsulating header (if there is one). */
......@@ -119,7 +69,6 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
}
#else
#define nf_bridge_maybe_copy_header(skb) (0)
#define nf_bridge_pad(skb) (0)
#define br_drop_fake_rtable(skb) do { } while (0)
#endif /* CONFIG_BRIDGE_NETFILTER */
......
......@@ -5,11 +5,7 @@
#include <net/ip.h>
#include <net/icmp.h>
static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
{
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct sk_buff *oldskb, int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
......
......@@ -3,15 +3,8 @@
#include <linux/icmpv6.h>
static inline void
nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
unsigned int hooknum)
{
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
}
void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
unsigned int hooknum);
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
......
......@@ -393,74 +393,6 @@ struct nft_rule {
__attribute__((aligned(__alignof__(struct nft_expr))));
};
/**
* struct nft_trans - nf_tables object update in transaction
*
* @list: used internally
* @msg_type: message type
* @ctx: transaction context
* @data: internal information related to the transaction
*/
struct nft_trans {
struct list_head list;
int msg_type;
struct nft_ctx ctx;
char data[0];
};
struct nft_trans_rule {
struct nft_rule *rule;
};
#define nft_trans_rule(trans) \
(((struct nft_trans_rule *)trans->data)->rule)
struct nft_trans_set {
struct nft_set *set;
u32 set_id;
};
#define nft_trans_set(trans) \
(((struct nft_trans_set *)trans->data)->set)
#define nft_trans_set_id(trans) \
(((struct nft_trans_set *)trans->data)->set_id)
struct nft_trans_chain {
bool update;
char name[NFT_CHAIN_MAXNAMELEN];
struct nft_stats __percpu *stats;
u8 policy;
};
#define nft_trans_chain_update(trans) \
(((struct nft_trans_chain *)trans->data)->update)
#define nft_trans_chain_name(trans) \
(((struct nft_trans_chain *)trans->data)->name)
#define nft_trans_chain_stats(trans) \
(((struct nft_trans_chain *)trans->data)->stats)
#define nft_trans_chain_policy(trans) \
(((struct nft_trans_chain *)trans->data)->policy)
struct nft_trans_table {
bool update;
bool enable;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
#define nft_trans_table_enable(trans) \
(((struct nft_trans_table *)trans->data)->enable)
struct nft_trans_elem {
struct nft_set *set;
struct nft_set_elem elem;
};
#define nft_trans_elem_set(trans) \
(((struct nft_trans_elem *)trans->data)->set)
#define nft_trans_elem(trans) \
(((struct nft_trans_elem *)trans->data)->elem)
static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
{
return (struct nft_expr *)&rule->data[0];
......@@ -528,6 +460,25 @@ enum nft_chain_type {
NFT_CHAIN_T_MAX
};
/**
* struct nf_chain_type - nf_tables chain type info
*
* @name: name of the type
* @type: numeric identifier
* @family: address family
* @owner: module owner
* @hook_mask: mask of valid hooks
* @hooks: hookfn overrides
*/
struct nf_chain_type {
const char *name;
enum nft_chain_type type;
int family;
struct module *owner;
unsigned int hook_mask;
nf_hookfn *hooks[NF_MAX_HOOKS];
};
int nft_chain_validate_dependency(const struct nft_chain *chain,
enum nft_chain_type type);
int nft_chain_validate_hooks(const struct nft_chain *chain,
......@@ -584,7 +535,7 @@ struct nft_table {
u64 hgenerator;
u32 use;
u16 flags;
char name[];
char name[NFT_TABLE_MAXNAMELEN];
};
/**
......@@ -614,25 +565,6 @@ struct nft_af_info {
int nft_register_afinfo(struct net *, struct nft_af_info *);
void nft_unregister_afinfo(struct nft_af_info *);
/**
* struct nf_chain_type - nf_tables chain type info
*
* @name: name of the type
* @type: numeric identifier
* @family: address family
* @owner: module owner
* @hook_mask: mask of valid hooks
* @hooks: hookfn overrides
*/
struct nf_chain_type {
const char *name;
enum nft_chain_type type;
int family;
struct module *owner;
unsigned int hook_mask;
nf_hookfn *hooks[NF_MAX_HOOKS];
};
int nft_register_chain_type(const struct nf_chain_type *);
void nft_unregister_chain_type(const struct nf_chain_type *);
......@@ -657,4 +589,72 @@ void nft_unregister_expr(struct nft_expr_type *);
#define MODULE_ALIAS_NFT_SET() \
MODULE_ALIAS("nft-set")
/**
* struct nft_trans - nf_tables object update in transaction
*
* @list: used internally
* @msg_type: message type
* @ctx: transaction context
* @data: internal information related to the transaction
*/
struct nft_trans {
struct list_head list;
int msg_type;
struct nft_ctx ctx;
char data[0];
};
struct nft_trans_rule {
struct nft_rule *rule;
};
#define nft_trans_rule(trans) \
(((struct nft_trans_rule *)trans->data)->rule)
struct nft_trans_set {
struct nft_set *set;
u32 set_id;
};
#define nft_trans_set(trans) \
(((struct nft_trans_set *)trans->data)->set)
#define nft_trans_set_id(trans) \
(((struct nft_trans_set *)trans->data)->set_id)
struct nft_trans_chain {
bool update;
char name[NFT_CHAIN_MAXNAMELEN];
struct nft_stats __percpu *stats;
u8 policy;
};
#define nft_trans_chain_update(trans) \
(((struct nft_trans_chain *)trans->data)->update)
#define nft_trans_chain_name(trans) \
(((struct nft_trans_chain *)trans->data)->name)
#define nft_trans_chain_stats(trans) \
(((struct nft_trans_chain *)trans->data)->stats)
#define nft_trans_chain_policy(trans) \
(((struct nft_trans_chain *)trans->data)->policy)
struct nft_trans_table {
bool update;
bool enable;
};
#define nft_trans_table_update(trans) \
(((struct nft_trans_table *)trans->data)->update)
#define nft_trans_table_enable(trans) \
(((struct nft_trans_table *)trans->data)->enable)
struct nft_trans_elem {
struct nft_set *set;
struct nft_set_elem elem;
};
#define nft_trans_elem_set(trans) \
(((struct nft_trans_elem *)trans->data)->set)
#define nft_trans_elem(trans) \
(((struct nft_trans_elem *)trans->data)->elem)
#endif /* _NET_NF_TABLES_H */
......@@ -9,6 +9,7 @@ struct ebt_table;
struct netns_xt {
struct list_head tables[NFPROTO_NUMPROTO];
bool notrack_deprecated_warning;
bool clusterip_deprecated_warning;
#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
struct ebt_table *broute_table;
......
#ifndef _LINUX_NF_TABLES_H
#define _LINUX_NF_TABLES_H
#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
......
......@@ -36,13 +36,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
u16 vid = 0;
rcu_read_lock();
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
br_nf_pre_routing_finish_bridge_slow(skb);
if (br_nf_prerouting_finish_bridge(skb)) {
rcu_read_unlock();
return NETDEV_TX_OK;
}
#endif
u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
......
......@@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* ip_fragment doesn't copy the MAC header */
if (nf_bridge_maybe_copy_header(skb) ||
!is_skb_forwardable(skb->dev, skb)) {
if (!is_skb_forwardable(skb->dev, skb)) {
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
......
......@@ -239,6 +239,14 @@ static int br_parse_ip_options(struct sk_buff *skb)
return -1;
}
static void nf_bridge_update_protocol(struct sk_buff *skb)
{
if (skb->nf_bridge->mask & BRNF_8021Q)
skb->protocol = htons(ETH_P_8021Q);
else if (skb->nf_bridge->mask & BRNF_PPPoE)
skb->protocol = htons(ETH_P_PPP_SES);
}
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
* bridge PRE_ROUTING hook. */
......@@ -764,23 +772,53 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
}
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
static bool nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
unsigned int header_size;
nf_bridge_update_protocol(skb);
header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow_head(skb, header_size);
if (err)
return false;
skb_copy_to_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
__skb_push(skb, nf_bridge_encap_header_len(skb));
return true;
}
static int br_nf_push_frag_xmit(struct sk_buff *skb)
{
if (!nf_bridge_copy_header(skb)) {
kfree_skb(skb);
return 0;
}
return br_dev_queue_push_xmit(skb);
}
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
int ret;
int frag_max_size;
unsigned int mtu_reserved;
if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
return br_dev_queue_push_xmit(skb);
mtu_reserved = nf_bridge_mtu_reduction(skb);
/* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting.
*/
if (skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb)) {
if (skb->len + mtu_reserved > skb->dev->mtu) {
frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
if (br_parse_ip_options(skb))
/* Drop invalid packet */
return NF_DROP;
IPCB(skb)->frag_max_size = frag_max_size;
ret = ip_fragment(skb, br_dev_queue_push_xmit);
ret = ip_fragment(skb, br_nf_push_frag_xmit);
} else
ret = br_dev_queue_push_xmit(skb);
......@@ -854,6 +892,38 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
return NF_ACCEPT;
}
/* This is called when br_netfilter has called into iptables/netfilter,
* and DNAT has taken place on a bridge-forwarded packet.
*
* neigh->output has created a new MAC header, with local br0 MAC
* as saddr.
*
* This restores the original MAC saddr of the bridged packet
* before invoking bridge forward logic to transmit the packet.
*/
static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
skb_pull(skb, ETH_HLEN);
nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
skb->dev = nf_bridge->physindev;
br_handle_frame_finish(skb);
}
int br_nf_prerouting_finish_bridge(struct sk_buff *skb)
{
if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
br_nf_pre_routing_finish_bridge_slow(skb);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(br_nf_prerouting_finish_bridge);
void br_netfilter_enable(void)
{
}
......
......@@ -765,10 +765,15 @@ static inline int br_vlan_enabled(struct net_bridge *br)
/* br_netfilter.c */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_nf_prerouting_finish_bridge(struct sk_buff *skb);
int br_nf_core_init(void);
void br_nf_core_fini(void);
void br_netfilter_rtable_init(struct net_bridge *);
#else
static inline int br_nf_prerouting_finish_bridge(struct sk_buff *skb)
{
return 0;
}
static inline int br_nf_core_init(void) { return 0; }
static inline void br_nf_core_fini(void) {}
#define br_netfilter_rtable_init(x)
......
......@@ -21,6 +21,7 @@
#include <net/ip.h>
#include <net/ip6_checksum.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include "../br_private.h"
static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
......@@ -36,7 +37,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
skb_pull(nskb, ETH_HLEN);
}
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
struct iphdr *niph;
......@@ -65,11 +71,12 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
br_deliver(br_port_get_rcu(dev), nskb);
}
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
u8 code)
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
......@@ -77,8 +84,9 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
unsigned int len;
void *payload;
__wsum csum;
u8 proto;
if (!nft_bridge_iphdr_validate(oldskb))
if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
......@@ -91,7 +99,17 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
if (!pskb_may_pull(oldskb, len))
return;
if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
if (pskb_trim_rcsum(oldskb, htons(ip_hdr(oldskb)->tot_len)))
return;
if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
ip_hdr(oldskb)->protocol == IPPROTO_UDP)
proto = ip_hdr(oldskb)->protocol;
else
proto = 0;
if (!skb_csum_unnecessary(oldskb) &&
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
......@@ -120,11 +138,13 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
br_deliver(br_port_get_rcu(dev), nskb);
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb, int hook)
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
const struct tcphdr *oth;
......@@ -152,12 +172,37 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net,
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
br_deliver(br_port_get_rcu(dev), nskb);
}
static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
int thoff;
__be16 fo;
u8 proto = ip6h->nexthdr;
if (skb->csum_bad)
return false;
if (skb_csum_unnecessary(skb))
return true;
if (ip6h->payload_len &&
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb, int hook,
u8 code)
struct sk_buff *oldskb,
const struct net_device *dev,
int hook, u8 code)
{
struct sk_buff *nskb;
struct ipv6hdr *nip6h;
......@@ -176,6 +221,9 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
if (!pskb_may_pull(oldskb, len))
return;
if (!reject6_br_csum_ok(oldskb, hook))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
......@@ -205,7 +253,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
br_deliver(br_port_get_rcu(dev), nskb);
}
static void nft_reject_bridge_eval(const struct nft_expr *expr,
......@@ -224,16 +272,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_br_send_v4_unreach(pkt->skb,
nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
pkt->ops->hooknum,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_br_send_v4_tcp_reset(pkt->skb,
nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_br_send_v4_unreach(pkt->skb,
nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
pkt->ops->hooknum,
nft_reject_icmp_code(priv->icmp_code));
break;
......@@ -242,16 +290,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
case htons(ETH_P_IPV6):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_br_send_v6_unreach(net, pkt->skb,
nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
pkt->ops->hooknum,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
nft_reject_br_send_v6_tcp_reset(net, pkt->skb, pkt->in,
pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_br_send_v6_unreach(net, pkt->skb,
nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in,
pkt->ops->hooknum,
nft_reject_icmpv6_code(priv->icmp_code));
break;
......
......@@ -36,24 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT
If unsure, say Y.
config NF_LOG_ARP
tristate "ARP packet logging"
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
config NF_LOG_IPV4
tristate "IPv4 packet logging"
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
if NF_TABLES
config NF_TABLES_IPV4
depends on NF_TABLES
tristate "IPv4 nf_tables support"
help
This option enables the IPv4 support for nf_tables.
if NF_TABLES_IPV4
config NFT_CHAIN_ROUTE_IPV4
depends on NF_TABLES_IPV4
tristate "IPv4 nf_tables route chain support"
help
This option enables the "route" chain for IPv4 in nf_tables. This
......@@ -61,22 +53,34 @@ config NFT_CHAIN_ROUTE_IPV4
fields such as the source, destination, type of service and
the packet mark.
config NF_REJECT_IPV4
tristate "IPv4 packet rejection"
default m if NETFILTER_ADVANCED=n
config NFT_REJECT_IPV4
depends on NF_TABLES_IPV4
select NF_REJECT_IPV4
default NFT_REJECT
tristate
endif # NF_TABLES_IPV4
config NF_TABLES_ARP
depends on NF_TABLES
tristate "ARP nf_tables support"
help
This option enables the ARP support for nf_tables.
endif # NF_TABLES
config NF_LOG_ARP
tristate "ARP packet logging"
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
config NF_LOG_IPV4
tristate "IPv4 packet logging"
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
config NF_REJECT_IPV4
tristate "IPv4 packet rejection"
default m if NETFILTER_ADVANCED=n
config NF_NAT_IPV4
tristate "IPv4 NAT"
depends on NF_CONNTRACK_IPV4
......
......@@ -418,6 +418,13 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
if (ret < 0)
pr_info("cannot load conntrack support for proto=%u\n",
par->family);
if (!par->net->xt.clusterip_deprecated_warning) {
pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
"use xt_cluster instead\n");
par->net->xt.clusterip_deprecated_warning = true;
}
return ret;
}
......
......@@ -34,31 +34,32 @@ static unsigned int
reject_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ipt_reject_info *reject = par->targinfo;
int hook = par->hooknum;
switch (reject->with) {
case IPT_ICMP_NET_UNREACHABLE:
nf_send_unreach(skb, ICMP_NET_UNREACH);
nf_send_unreach(skb, ICMP_NET_UNREACH, hook);
break;
case IPT_ICMP_HOST_UNREACHABLE:
nf_send_unreach(skb, ICMP_HOST_UNREACH);
nf_send_unreach(skb, ICMP_HOST_UNREACH, hook);
break;
case IPT_ICMP_PROT_UNREACHABLE:
nf_send_unreach(skb, ICMP_PROT_UNREACH);
nf_send_unreach(skb, ICMP_PROT_UNREACH, hook);
break;
case IPT_ICMP_PORT_UNREACHABLE:
nf_send_unreach(skb, ICMP_PORT_UNREACH);
nf_send_unreach(skb, ICMP_PORT_UNREACH, hook);
break;
case IPT_ICMP_NET_PROHIBITED:
nf_send_unreach(skb, ICMP_NET_ANO);
nf_send_unreach(skb, ICMP_NET_ANO, hook);
break;
case IPT_ICMP_HOST_PROHIBITED:
nf_send_unreach(skb, ICMP_HOST_ANO);
nf_send_unreach(skb, ICMP_HOST_ANO, hook);
break;
case IPT_ICMP_ADMIN_PROHIBITED:
nf_send_unreach(skb, ICMP_PKT_FILTERED);
nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
break;
case IPT_TCP_RESET:
nf_send_reset(skb, par->hooknum);
nf_send_reset(skb, hook);
case IPT_ICMP_ECHOREPLY:
/* Doesn't happen. */
break;
......
......@@ -164,4 +164,27 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
}
EXPORT_SYMBOL_GPL(nf_send_reset);
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
{
struct iphdr *iph = ip_hdr(skb_in);
u8 proto;
if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET))
return;
if (skb_csum_unnecessary(skb_in)) {
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
return;
}
if (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)
proto = iph->protocol;
else
proto = 0;
if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
EXPORT_SYMBOL_GPL(nf_send_unreach);
MODULE_LICENSE("GPL");
......@@ -27,7 +27,8 @@ static void nft_reject_ipv4_eval(const struct nft_expr *expr,
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach(pkt->skb, priv->icmp_code);
nf_send_unreach(pkt->skb, priv->icmp_code,
pkt->ops->hooknum);
break;
case NFT_REJECT_TCP_RST:
nf_send_reset(pkt->skb, pkt->ops->hooknum);
......
......@@ -25,14 +25,16 @@ config NF_CONNTRACK_IPV6
To compile it as a module, choose M here. If unsure, say N.
if NF_TABLES
config NF_TABLES_IPV6
depends on NF_TABLES
tristate "IPv6 nf_tables support"
help
This option enables the IPv6 support for nf_tables.
if NF_TABLES_IPV6
config NFT_CHAIN_ROUTE_IPV6
depends on NF_TABLES_IPV6
tristate "IPv6 nf_tables route chain support"
help
This option enables the "route" chain for IPv6 in nf_tables. This
......@@ -40,16 +42,18 @@ config NFT_CHAIN_ROUTE_IPV6
fields such as the source, destination, flowlabel, hop-limit and
the packet mark.
config NF_REJECT_IPV6
tristate "IPv6 packet rejection"
default m if NETFILTER_ADVANCED=n
config NFT_REJECT_IPV6
depends on NF_TABLES_IPV6
select NF_REJECT_IPV6
default NFT_REJECT
tristate
endif # NF_TABLES_IPV6
endif # NF_TABLES
config NF_REJECT_IPV6
tristate "IPv6 packet rejection"
default m if NETFILTER_ADVANCED=n
config NF_LOG_IPV6
tristate "IPv6 packet logging"
default m if NETFILTER_ADVANCED=n
......
......@@ -208,4 +208,39 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
}
EXPORT_SYMBOL_GPL(nf_send_reset6);
static bool reject6_csum_ok(struct sk_buff *skb, int hook)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
int thoff;
__be16 fo;
u8 proto;
if (skb->csum_bad)
return false;
if (skb_csum_unnecessary(skb))
return true;
proto = ip6h->nexthdr;
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
}
void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
unsigned char code, unsigned int hooknum)
{
if (!reject6_csum_ok(skb_in, hooknum))
return;
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
}
EXPORT_SYMBOL_GPL(nf_send_unreach6);
MODULE_LICENSE("GPL");
......@@ -438,8 +438,10 @@ config NF_TABLES
To compile it as a module, choose M here.
if NF_TABLES
config NF_TABLES_INET
depends on NF_TABLES && IPV6
depends on IPV6
select NF_TABLES_IPV4
select NF_TABLES_IPV6
tristate "Netfilter nf_tables mixed IPv4/IPv6 tables support"
......@@ -447,21 +449,18 @@ config NF_TABLES_INET
This option enables support for a mixed IPv4/IPv6 "inet" table.
config NFT_EXTHDR
depends on NF_TABLES
tristate "Netfilter nf_tables IPv6 exthdr module"
help
This option adds the "exthdr" expression that you can use to match
IPv6 extension headers.
config NFT_META
depends on NF_TABLES
tristate "Netfilter nf_tables meta module"
help
This option adds the "meta" expression that you can use to match and
to set packet metainformation such as the packet mark.
config NFT_CT
depends on NF_TABLES
depends on NF_CONNTRACK
tristate "Netfilter nf_tables conntrack module"
help
......@@ -469,42 +468,36 @@ config NFT_CT
connection tracking information such as the flow state.
config NFT_RBTREE
depends on NF_TABLES
tristate "Netfilter nf_tables rbtree set module"
help
This option adds the "rbtree" set type (Red Black tree) that is used
to build interval-based sets.
config NFT_HASH
depends on NF_TABLES
tristate "Netfilter nf_tables hash set module"
help
This option adds the "hash" set type that is used to build one-way
mappings between matchings and actions.
config NFT_COUNTER
depends on NF_TABLES
tristate "Netfilter nf_tables counter module"
help
This option adds the "counter" expression that you can use to
include packet and byte counters in a rule.
config NFT_LOG
depends on NF_TABLES
tristate "Netfilter nf_tables log module"
help
This option adds the "log" expression that you can use to log
packets matching some criteria.
config NFT_LIMIT
depends on NF_TABLES
tristate "Netfilter nf_tables limit module"
help
This option adds the "limit" expression that you can use to
ratelimit rule matchings.
config NFT_MASQ
depends on NF_TABLES
depends on NF_CONNTRACK
depends on NF_NAT
tristate "Netfilter nf_tables masquerade support"
......@@ -513,7 +506,6 @@ config NFT_MASQ
to perform NAT in the masquerade flavour.
config NFT_REDIR
depends on NF_TABLES
depends on NF_CONNTRACK
depends on NF_NAT
tristate "Netfilter nf_tables redirect support"
......@@ -522,7 +514,6 @@ config NFT_REDIR
to perform NAT in the redirect flavour.
config NFT_NAT
depends on NF_TABLES
depends on NF_CONNTRACK
select NF_NAT
tristate "Netfilter nf_tables nat module"
......@@ -531,7 +522,6 @@ config NFT_NAT
typical Network Address Translation (NAT) packet transformations.
config NFT_QUEUE
depends on NF_TABLES
depends on NETFILTER_XTABLES
depends on NETFILTER_NETLINK_QUEUE
tristate "Netfilter nf_tables queue module"
......@@ -540,7 +530,6 @@ config NFT_QUEUE
infrastructure (also known as NFQUEUE) from nftables.
config NFT_REJECT
depends on NF_TABLES
default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support"
help
......@@ -554,7 +543,6 @@ config NFT_REJECT_INET
tristate
config NFT_COMPAT
depends on NF_TABLES
depends on NETFILTER_XTABLES
tristate "Netfilter x_tables over nf_tables module"
help
......@@ -562,6 +550,8 @@ config NFT_COMPAT
x_tables match/target extensions over the nf_tables
framework.
endif # NF_TABLES
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
......
......@@ -401,7 +401,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
[NFTA_TABLE_NAME] = { .type = NLA_STRING },
[NFTA_TABLE_NAME] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
};
......@@ -686,13 +687,13 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
if (!try_module_get(afi->owner))
return -EAFNOSUPPORT;
table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (table == NULL) {
module_put(afi->owner);
return -ENOMEM;
}
nla_strlcpy(table->name, name, nla_len(name));
nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
table->flags = flags;
......
......@@ -21,6 +21,48 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_log.h>
enum nft_trace {
NFT_TRACE_RULE,
NFT_TRACE_RETURN,
NFT_TRACE_POLICY,
};
static const char *const comments[] = {
[NFT_TRACE_RULE] = "rule",
[NFT_TRACE_RETURN] = "return",
[NFT_TRACE_POLICY] = "policy",
};
static struct nf_loginfo trace_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 4,
.logflags = NF_LOG_MASK,
},
},
};
static void __nft_trace_packet(const struct nft_pktinfo *pkt,
const struct nft_chain *chain,
int rulenum, enum nft_trace type)
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
chain->table->name, chain->name, comments[type],
rulenum);
}
static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
const struct nft_chain *chain,
int rulenum, enum nft_trace type)
{
if (unlikely(pkt->skb->nf_trace))
__nft_trace_packet(pkt, chain, rulenum, type);
}
static void nft_cmp_fast_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1])
{
......@@ -66,40 +108,6 @@ struct nft_jumpstack {
int rulenum;
};
enum nft_trace {
NFT_TRACE_RULE,
NFT_TRACE_RETURN,
NFT_TRACE_POLICY,
};
static const char *const comments[] = {
[NFT_TRACE_RULE] = "rule",
[NFT_TRACE_RETURN] = "return",
[NFT_TRACE_POLICY] = "policy",
};
static struct nf_loginfo trace_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 4,
.logflags = NF_LOG_MASK,
},
},
};
static void nft_trace_packet(const struct nft_pktinfo *pkt,
const struct nft_chain *chain,
int rulenum, enum nft_trace type)
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
chain->table->name, chain->name, comments[type],
rulenum);
}
unsigned int
nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
......@@ -146,8 +154,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
continue;
case NFT_CONTINUE:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
continue;
}
break;
......@@ -157,37 +164,28 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
return data[NFT_REG_VERDICT].verdict;
}
switch (data[NFT_REG_VERDICT].verdict) {
case NFT_JUMP:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
jumpstack[stackptr].chain = chain;
jumpstack[stackptr].rule = rule;
jumpstack[stackptr].rulenum = rulenum;
stackptr++;
chain = data[NFT_REG_VERDICT].chain;
goto do_chain;
/* fall through */
case NFT_GOTO:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
chain = data[NFT_REG_VERDICT].chain;
goto do_chain;
case NFT_RETURN:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
break;
case NFT_CONTINUE:
if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
rulenum++;
/* fall through */
case NFT_RETURN:
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
break;
default:
WARN_ON(1);
......@@ -201,8 +199,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
goto next_rule;
}
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
rcu_read_lock_bh();
stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
......
......@@ -28,14 +28,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
case NFPROTO_IPV4:
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach(pkt->skb, priv->icmp_code);
nf_send_unreach(pkt->skb, priv->icmp_code,
pkt->ops->hooknum);
break;
case NFT_REJECT_TCP_RST:
nf_send_reset(pkt->skb, pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach(pkt->skb,
nft_reject_icmp_code(priv->icmp_code));
nft_reject_icmp_code(priv->icmp_code),
pkt->ops->hooknum);
break;
}
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment