Commit 4218b0e2 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Florian Westphal says:

====================
Netfilter updates for net-next

following patch set includes netfilter updates for your *net-next* tree.

1. Replace pr_debug use with nf_log infra for debugging in sctp
   conntrack.
2. Remove pr_debug calls, they are either useless or we have better
   options in place.
3. Avoid repeated load of ct->status in some spots.
   Some bit-flags cannot change during the lifeetime of
   a connection, so no need to re-fetch those.
4. Avoid uneeded nesting of rcu_read_lock during tuple lookup.
5. Remove the CLUSTERIP target.  Marked as obsolete for years,
   and we still have WARN splats wrt. races of the out-of-band
   /proc interface installed by this target.
6. Add static key to nf_tables to avoid the retpoline mitigation
   if/else if cascade provided the cpu doesn't need the retpoline thunk.
7. add nf_tables objref calls to the retpoline mitigation workaround.
8. Split parts of nft_ct.c that do not need symbols exported by
   the conntrack modules and place them in nf_tables directly.
   This allows to avoid indirect call for 'ct status' checks.
9. Add 'destroy' commands to nf_tables.  They are identical
   to the existing 'delete' commands, but do not indicate
   an error if the referenced object (set, chain, rule...)
   did not exist, from Fernando.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 387f5f7d f80a612d
......@@ -61,6 +61,16 @@ struct nft_immediate_expr {
extern const struct nft_expr_ops nft_cmp_fast_ops;
extern const struct nft_expr_ops nft_cmp16_fast_ops;
struct nft_ct {
enum nft_ct_keys key:8;
enum ip_conntrack_dir dir:8;
u8 len;
union {
u8 dreg;
u8 sreg;
};
};
struct nft_payload {
enum nft_payload_bases base:8;
u8 offset;
......@@ -140,6 +150,8 @@ void nft_rt_get_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_ct_get_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt);
enum {
NFT_PAYLOAD_CTX_INNER_TUN = (1 << 0),
......@@ -164,4 +176,8 @@ void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt,
struct nft_inner_tun_ctx *ctx);
void nft_objref_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_objref_map_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
......@@ -98,6 +98,13 @@ enum nft_verdicts {
* @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
* @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
* @NFT_MSG_GETRULE_RESET: get rules and reset stateful expressions (enum nft_obj_attributes)
* @NFT_MSG_DESTROYTABLE: destroy a table (enum nft_table_attributes)
* @NFT_MSG_DESTROYCHAIN: destroy a chain (enum nft_chain_attributes)
* @NFT_MSG_DESTROYRULE: destroy a rule (enum nft_rule_attributes)
* @NFT_MSG_DESTROYSET: destroy a set (enum nft_set_attributes)
* @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes)
* @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes)
* @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
......@@ -126,6 +133,13 @@ enum nf_tables_msg_types {
NFT_MSG_GETFLOWTABLE,
NFT_MSG_DELFLOWTABLE,
NFT_MSG_GETRULE_RESET,
NFT_MSG_DESTROYTABLE,
NFT_MSG_DESTROYCHAIN,
NFT_MSG_DESTROYRULE,
NFT_MSG_DESTROYSET,
NFT_MSG_DESTROYSETELEM,
NFT_MSG_DESTROYOBJ,
NFT_MSG_DESTROYFLOWTABLE,
NFT_MSG_MAX,
};
......
......@@ -259,20 +259,6 @@ config IP_NF_MANGLE
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_CLUSTERIP
tristate "CLUSTERIP target support"
depends on IP_NF_MANGLE
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_CONNTRACK_MARK
select NETFILTER_FAMILY_ARP
help
The CLUSTERIP target allows you to build load-balancing clusters of
network servers without having a dedicated load-balancing
router/server/switch.
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_ECN
tristate "ECN target support"
depends on IP_NF_MANGLE
......
......@@ -39,7 +39,6 @@ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
......
This diff is collapsed.
......@@ -98,6 +98,12 @@ nf_tables-objs += nft_set_pipapo_avx2.o
endif
endif
ifdef CONFIG_NFT_CT
ifdef CONFIG_RETPOLINE
nf_tables-objs += nft_ct_fast.o
endif
endif
obj-$(CONFIG_NF_TABLES) += nf_tables.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o
......
......@@ -514,7 +514,6 @@ EXPORT_SYMBOL_GPL(nf_ct_get_id);
static void
clean_from_lists(struct nf_conn *ct)
{
pr_debug("clean_from_lists(%p)\n", ct);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
......@@ -582,7 +581,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
pr_debug("%s(%p)\n", __func__, ct);
WARN_ON(refcount_read(&nfct->use) != 0);
if (unlikely(nf_ct_is_template(ct))) {
......@@ -603,7 +601,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
if (ct->master)
nf_ct_put(ct->master);
pr_debug("%s: returning ct=%p to slab\n", __func__, ct);
nf_conntrack_free(ct);
}
EXPORT_SYMBOL(nf_ct_destroy);
......@@ -786,8 +783,6 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
rcu_read_lock();
h = ____nf_conntrack_find(net, zone, tuple, hash);
if (h) {
/* We have a candidate that matches the tuple we're interested
......@@ -799,7 +794,7 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
smp_acquire__after_ctrl_dep();
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
goto found;
return h;
/* TYPESAFE_BY_RCU recycled the candidate */
nf_ct_put(ct);
......@@ -807,8 +802,6 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
h = NULL;
}
found:
rcu_read_unlock();
return h;
}
......@@ -820,16 +813,21 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
struct nf_conntrack_tuple_hash *thash;
rcu_read_lock();
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, zone_id, net));
if (thash)
return thash;
goto out_unlock;
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
if (rid != zone_id)
return __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, rid, net));
thash = __nf_conntrack_find_get(net, zone, tuple,
hash_conntrack_raw(tuple, rid, net));
out_unlock:
rcu_read_unlock();
return thash;
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
......@@ -1210,7 +1208,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto dying;
}
pr_debug("Confirming conntrack %p\n", ct);
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
* user context, else we insert an already 'dead' hash, blocking
......@@ -1721,10 +1718,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_zone tmp;
struct nf_conntrack_net *cnet;
if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
pr_debug("Can't invert tuple.\n");
if (!nf_ct_invert_tuple(&repl_tuple, tuple))
return NULL;
}
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
......@@ -1764,8 +1759,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
spin_lock_bh(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
if (exp) {
pr_debug("expectation arrives ct=%p exp=%p\n",
ct, exp);
/* Welcome, Mr. Bond. We've been expecting you... */
__set_bit(IPS_EXPECTED_BIT, &ct->status);
/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
......@@ -1829,10 +1822,8 @@ resolve_normal_ct(struct nf_conn *tmpl,
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
&tuple)) {
pr_debug("Can't get tuple\n");
&tuple))
return 0;
}
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
......@@ -1864,17 +1855,15 @@ resolve_normal_ct(struct nf_conn *tmpl,
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
ctinfo = IP_CT_ESTABLISHED_REPLY;
} else {
unsigned long status = READ_ONCE(ct->status);
/* Once we've had two way comms, always ESTABLISHED. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
pr_debug("normal packet for %p\n", ct);
if (likely(status & IPS_SEEN_REPLY))
ctinfo = IP_CT_ESTABLISHED;
} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
pr_debug("related packet for %p\n", ct);
else if (status & IPS_EXPECTED)
ctinfo = IP_CT_RELATED;
} else {
pr_debug("new packet for %p\n", ct);
else
ctinfo = IP_CT_NEW;
}
}
nf_ct_set(skb, ct, ctinfo);
return 0;
......@@ -1988,7 +1977,6 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
/* rcu_read_lock()ed by nf_hook_thresh */
dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
pr_debug("not prepared to track yet or error occurred\n");
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
......@@ -2027,7 +2015,6 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
pr_debug("nf_conntrack_in: Can't track with proto module\n");
nf_ct_put(ct);
skb->_nfct = 0;
/* Special case: TCP tracker reports an attempt to reopen a
......@@ -2066,7 +2053,6 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
/* Should be unconfirmed, so not in hash table yet */
WARN_ON(nf_ct_is_confirmed(ct));
pr_debug("Altering reply tuple of %p to ", ct);
nf_ct_dump_tuple(newreply);
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
......
......@@ -284,16 +284,11 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
/* We only do TCP and SCTP at the moment: is there a better way? */
if (tuple.dst.protonum != IPPROTO_TCP &&
tuple.dst.protonum != IPPROTO_SCTP) {
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
tuple.dst.protonum != IPPROTO_SCTP)
return -ENOPROTOOPT;
}
if ((unsigned int)*len < sizeof(struct sockaddr_in)) {
pr_debug("SO_ORIGINAL_DST: len %d not %zu\n",
*len, sizeof(struct sockaddr_in));
if ((unsigned int)*len < sizeof(struct sockaddr_in))
return -EINVAL;
}
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (h) {
......@@ -307,17 +302,12 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
&sin.sin_addr.s_addr, ntohs(sin.sin_port));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
&tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
&tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
return -ENOENT;
}
......@@ -360,12 +350,8 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
return -EINVAL;
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (!h) {
pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
&tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
&tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port));
if (!h)
return -ENOENT;
}
ct = nf_ct_tuplehash_to_ctrack(h);
......
......@@ -168,7 +168,8 @@ for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \
static int do_basic_checks(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
unsigned long *map)
unsigned long *map,
const struct nf_hook_state *state)
{
u_int32_t offset, count;
struct sctp_chunkhdr _sch, *sch;
......@@ -177,8 +178,6 @@ static int do_basic_checks(struct nf_conn *ct,
flag = 0;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
pr_debug("Chunk Num: %d Type: %d\n", count, sch->type);
if (sch->type == SCTP_CID_INIT ||
sch->type == SCTP_CID_INIT_ACK ||
sch->type == SCTP_CID_SHUTDOWN_COMPLETE)
......@@ -193,7 +192,9 @@ static int do_basic_checks(struct nf_conn *ct,
sch->type == SCTP_CID_COOKIE_ECHO ||
flag) &&
count != 0) || !sch->length) {
pr_debug("Basic checks failed\n");
nf_ct_l4proto_log_invalid(skb, ct, state,
"%s failed. chunk num %d, type %d, len %d flag %d\n",
__func__, count, sch->type, sch->length, flag);
return 1;
}
......@@ -201,7 +202,6 @@ static int do_basic_checks(struct nf_conn *ct,
set_bit(sch->type, map);
}
pr_debug("Basic checks passed\n");
return count == 0;
}
......@@ -211,69 +211,51 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
{
int i;
pr_debug("Chunk type: %d\n", chunk_type);
switch (chunk_type) {
case SCTP_CID_INIT:
pr_debug("SCTP_CID_INIT\n");
i = 0;
break;
case SCTP_CID_INIT_ACK:
pr_debug("SCTP_CID_INIT_ACK\n");
i = 1;
break;
case SCTP_CID_ABORT:
pr_debug("SCTP_CID_ABORT\n");
i = 2;
break;
case SCTP_CID_SHUTDOWN:
pr_debug("SCTP_CID_SHUTDOWN\n");
i = 3;
break;
case SCTP_CID_SHUTDOWN_ACK:
pr_debug("SCTP_CID_SHUTDOWN_ACK\n");
i = 4;
break;
case SCTP_CID_ERROR:
pr_debug("SCTP_CID_ERROR\n");
i = 5;
break;
case SCTP_CID_COOKIE_ECHO:
pr_debug("SCTP_CID_COOKIE_ECHO\n");
i = 6;
break;
case SCTP_CID_COOKIE_ACK:
pr_debug("SCTP_CID_COOKIE_ACK\n");
i = 7;
break;
case SCTP_CID_SHUTDOWN_COMPLETE:
pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
i = 8;
break;
case SCTP_CID_HEARTBEAT:
pr_debug("SCTP_CID_HEARTBEAT");
i = 9;
break;
case SCTP_CID_HEARTBEAT_ACK:
pr_debug("SCTP_CID_HEARTBEAT_ACK");
i = 10;
break;
case SCTP_CID_DATA:
case SCTP_CID_SACK:
pr_debug("SCTP_CID_DATA/SACK");
i = 11;
break;
default:
/* Other chunks like DATA or SACK do not change the state */
pr_debug("Unknown chunk type, Will stay in %s\n",
sctp_conntrack_names[cur_state]);
pr_debug("Unknown chunk type %d, Will stay in %s\n",
chunk_type, sctp_conntrack_names[cur_state]);
return cur_state;
}
pr_debug("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
dir, sctp_conntrack_names[cur_state], chunk_type,
sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
return sctp_conntracks[dir][i][cur_state];
}
......@@ -392,7 +374,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
if (sh == NULL)
goto out;
if (do_basic_checks(ct, skb, dataoff, map) != 0)
if (do_basic_checks(ct, skb, dataoff, map, state) != 0)
goto out;
if (!nf_ct_is_confirmed(ct)) {
......@@ -414,7 +396,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
!test_bit(SCTP_CID_HEARTBEAT, map) &&
!test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
sh->vtag != ct->proto.sctp.vtag[dir]) {
pr_debug("Verification tag check failed\n");
nf_ct_l4proto_log_invalid(skb, ct, state,
"verification tag check failed %x vs %x for dir %d",
sh->vtag, ct->proto.sctp.vtag[dir], dir);
goto out;
}
}
......@@ -488,9 +472,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* Invalid */
if (new_state == SCTP_CONNTRACK_MAX) {
pr_debug("nf_conntrack_sctp: Invalid dir=%i ctype=%u "
"conntrack=%u\n",
dir, sch->type, old_state);
nf_ct_l4proto_log_invalid(skb, ct, state,
"Invalid, old_state %d, dir %d, type %d",
old_state, dir, sch->type);
goto out_unlock;
}
......@@ -536,7 +521,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
dir == IP_CT_DIR_REPLY &&
new_state == SCTP_CONNTRACK_ESTABLISHED) {
pr_debug("Setting assured bit\n");
set_bit(IPS_ASSURED_BIT, &ct->status);
nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
......
......@@ -930,7 +930,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
enum nf_ct_tcp_action res;
......@@ -954,7 +953,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
dir = CTINFO2DIR(ctinfo);
index = get_conntrack_index(th);
new_state = tcp_conntracks[dir][index][old_state];
tuple = &ct->tuplehash[dir].tuple;
switch (new_state) {
case TCP_CONNTRACK_SYN_SENT:
......@@ -1217,13 +1215,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
pr_debug("tcp_conntracks: ");
nf_ct_dump_tuple(tuple);
pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
(th->syn ? 1 : 0), (th->ack ? 1 : 0),
(th->fin ? 1 : 0), (th->rst ? 1 : 0),
old_state, new_state);
ct->proto.tcp.state = new_state;
if (old_state != new_state
&& new_state == TCP_CONNTRACK_FIN_WAIT)
......
......@@ -88,6 +88,7 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
const struct nf_hook_state *state)
{
unsigned int *timeouts;
unsigned long status;
if (udp_error(skb, dataoff, state))
return -NF_ACCEPT;
......@@ -96,26 +97,27 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
if (!timeouts)
timeouts = udp_get_timeouts(nf_ct_net(ct));
if (!nf_ct_is_confirmed(ct))
status = READ_ONCE(ct->status);
if ((status & IPS_CONFIRMED) == 0)
ct->proto.udp.stream_ts = 2 * HZ + jiffies;
/* If we've seen traffic both ways, this is some kind of UDP
* stream. Set Assured.
*/
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
if (status & IPS_SEEN_REPLY_BIT) {
unsigned long extra = timeouts[UDP_CT_UNREPLIED];
bool stream = false;
/* Still active after two seconds? Extend timeout. */
if (time_after(jiffies, ct->proto.udp.stream_ts)) {
extra = timeouts[UDP_CT_REPLIED];
stream = true;
stream = (status & IPS_ASSURED) == 0;
}
nf_ct_refresh_acct(ct, ctinfo, skb, extra);
/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
if (unlikely((ct->status & IPS_NAT_CLASH)))
if (unlikely((status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe */
......
This diff is collapsed.
......@@ -21,6 +21,26 @@
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nft_meta.h>
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_X86)
static struct static_key_false nf_tables_skip_direct_calls;
static bool nf_skip_indirect_calls(void)
{
return static_branch_likely(&nf_tables_skip_direct_calls);
}
static void __init nf_skip_indirect_calls_enable(void)
{
if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE))
static_branch_enable(&nf_tables_skip_direct_calls);
}
#else
static inline bool nf_skip_indirect_calls(void) { return false; }
static inline void nf_skip_indirect_calls_enable(void) { }
#endif
static noinline void __nft_trace_packet(struct nft_traceinfo *info,
const struct nft_chain *chain,
enum nft_trace_types type)
......@@ -193,7 +213,12 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
struct nft_pktinfo *pkt)
{
#ifdef CONFIG_RETPOLINE
unsigned long e = (unsigned long)expr->ops->eval;
unsigned long e;
if (nf_skip_indirect_calls())
goto indirect_call;
e = (unsigned long)expr->ops->eval;
#define X(e, fun) \
do { if ((e) == (unsigned long)(fun)) \
return fun(expr, regs, pkt); } while (0)
......@@ -203,13 +228,19 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
X(e, nft_counter_eval);
X(e, nft_meta_get_eval);
X(e, nft_lookup_eval);
#if IS_ENABLED(CONFIG_NFT_CT)
X(e, nft_ct_get_fast_eval);
#endif
X(e, nft_range_eval);
X(e, nft_immediate_eval);
X(e, nft_byteorder_eval);
X(e, nft_dynset_eval);
X(e, nft_rt_get_eval);
X(e, nft_bitwise_eval);
X(e, nft_objref_eval);
X(e, nft_objref_map_eval);
#undef X
indirect_call:
#endif /* CONFIG_RETPOLINE */
expr->ops->eval(expr, regs, pkt);
}
......@@ -369,6 +400,8 @@ int __init nf_tables_core_module_init(void)
goto err;
}
nf_skip_indirect_calls_enable();
return 0;
err:
......
......@@ -12,7 +12,7 @@
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_tuple.h>
......@@ -23,16 +23,6 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
struct nft_ct {
enum nft_ct_keys key:8;
enum ip_conntrack_dir dir:8;
u8 len;
union {
u8 dreg;
u8 sreg;
};
};
struct nft_ct_helper_obj {
struct nf_conntrack_helper *helper4;
struct nf_conntrack_helper *helper6;
......@@ -759,6 +749,18 @@ static bool nft_ct_set_reduce(struct nft_regs_track *track,
return false;
}
#ifdef CONFIG_RETPOLINE
static const struct nft_expr_ops nft_ct_get_fast_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
.eval = nft_ct_get_fast_eval,
.init = nft_ct_get_init,
.destroy = nft_ct_get_destroy,
.dump = nft_ct_get_dump,
.reduce = nft_ct_set_reduce,
};
#endif
static const struct nft_expr_ops nft_ct_set_ops = {
.type = &nft_ct_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
......@@ -791,8 +793,21 @@ nft_ct_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG])
return ERR_PTR(-EINVAL);
if (tb[NFTA_CT_DREG])
if (tb[NFTA_CT_DREG]) {
#ifdef CONFIG_RETPOLINE
u32 k = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
switch (k) {
case NFT_CT_STATE:
case NFT_CT_DIRECTION:
case NFT_CT_STATUS:
case NFT_CT_MARK:
case NFT_CT_SECMARK:
return &nft_ct_get_fast_ops;
}
#endif
return &nft_ct_get_ops;
}
if (tb[NFTA_CT_SREG]) {
#ifdef CONFIG_NF_CONNTRACK_ZONES
......
// SPDX-License-Identifier: GPL-2.0-only
#if IS_ENABLED(CONFIG_NFT_CT)
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack.h>
void nft_ct_get_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct *priv = nft_expr_priv(expr);
u32 *dest = &regs->data[priv->dreg];
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
if (!ct) {
regs->verdict.code = NFT_BREAK;
return;
}
switch (priv->key) {
case NFT_CT_STATE:
if (ct)
state = NF_CT_STATE_BIT(ctinfo);
else if (ctinfo == IP_CT_UNTRACKED)
state = NF_CT_STATE_UNTRACKED_BIT;
else
state = NF_CT_STATE_INVALID_BIT;
*dest = state;
return;
case NFT_CT_DIRECTION:
nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return;
case NFT_CT_STATUS:
*dest = ct->status;
return;
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
*dest = ct->mark;
return;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
case NFT_CT_SECMARK:
*dest = ct->secmark;
return;
#endif
default:
WARN_ON_ONCE(1);
regs->verdict.code = NFT_BREAK;
break;
}
}
EXPORT_SYMBOL_GPL(nft_ct_get_fast_eval);
#endif
......@@ -13,9 +13,9 @@
#define nft_objref_priv(expr) *((struct nft_object **)nft_expr_priv(expr))
static void nft_objref_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
void nft_objref_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_object *obj = nft_objref_priv(expr);
......@@ -100,9 +100,9 @@ struct nft_objref_map {
struct nft_set_binding binding;
};
static void nft_objref_map_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
void nft_objref_map_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment