Commit 12e7ada3 authored by Patrick McHardy's avatar Patrick McHardy Committed by Pablo Neira Ayuso

netfilter: nf_nat: use per-conntrack locking for sequence number adjustments

Get rid of the global lock and use per-conntrack locks for protecting the
sequencen number adjustment data. Additionally saves one lock/unlock
operation for every TCP packet.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 2d89c68a
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \ pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
x->offset_before, x->offset_after, x->correction_pos); x->offset_before, x->offset_after, x->correction_pos);
static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
/* Setup TCP sequence correction given this change at this sequence */ /* Setup TCP sequence correction given this change at this sequence */
static inline void static inline void
adjust_tcp_sequence(u32 seq, adjust_tcp_sequence(u32 seq,
...@@ -49,7 +47,7 @@ adjust_tcp_sequence(u32 seq, ...@@ -49,7 +47,7 @@ adjust_tcp_sequence(u32 seq,
pr_debug("adjust_tcp_sequence: Seq_offset before: "); pr_debug("adjust_tcp_sequence: Seq_offset before: ");
DUMP_OFFSET(this_way); DUMP_OFFSET(this_way);
spin_lock_bh(&nf_nat_seqofs_lock); spin_lock_bh(&ct->lock);
/* SYN adjust. If it's uninitialized, or this is after last /* SYN adjust. If it's uninitialized, or this is after last
* correction, record it: we don't handle more than one * correction, record it: we don't handle more than one
...@@ -61,31 +59,26 @@ adjust_tcp_sequence(u32 seq, ...@@ -61,31 +59,26 @@ adjust_tcp_sequence(u32 seq,
this_way->offset_before = this_way->offset_after; this_way->offset_before = this_way->offset_after;
this_way->offset_after += sizediff; this_way->offset_after += sizediff;
} }
spin_unlock_bh(&nf_nat_seqofs_lock); spin_unlock_bh(&ct->lock);
pr_debug("adjust_tcp_sequence: Seq_offset after: "); pr_debug("adjust_tcp_sequence: Seq_offset after: ");
DUMP_OFFSET(this_way); DUMP_OFFSET(this_way);
} }
/* Get the offset value, for conntrack */ /* Get the offset value, for conntrack. Caller must have the conntrack locked */
s32 nf_nat_get_offset(const struct nf_conn *ct, s32 nf_nat_get_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir, enum ip_conntrack_dir dir,
u32 seq) u32 seq)
{ {
struct nf_conn_nat *nat = nfct_nat(ct); struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way; struct nf_nat_seq *this_way;
s32 offset;
if (!nat) if (!nat)
return 0; return 0;
this_way = &nat->seq[dir]; this_way = &nat->seq[dir];
spin_lock_bh(&nf_nat_seqofs_lock); return after(seq, this_way->correction_pos)
offset = after(seq, this_way->correction_pos)
? this_way->offset_after : this_way->offset_before; ? this_way->offset_after : this_way->offset_before;
spin_unlock_bh(&nf_nat_seqofs_lock);
return offset;
} }
/* Frobs data inside this packet, which is linear. */ /* Frobs data inside this packet, which is linear. */
...@@ -384,7 +377,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, ...@@ -384,7 +377,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
return 0; return 0;
tcph = (void *)skb->data + protoff; tcph = (void *)skb->data + protoff;
spin_lock_bh(&nf_nat_seqofs_lock); spin_lock_bh(&ct->lock);
if (after(ntohl(tcph->seq), this_way->correction_pos)) if (after(ntohl(tcph->seq), this_way->correction_pos))
seqoff = this_way->offset_after; seqoff = this_way->offset_after;
else else
...@@ -410,7 +403,7 @@ nf_nat_seq_adjust(struct sk_buff *skb, ...@@ -410,7 +403,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph->ack_seq = newack; tcph->ack_seq = newack;
res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo); res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
spin_unlock_bh(&nf_nat_seqofs_lock); spin_unlock_bh(&ct->lock);
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment