Commit 7c9a2eea authored by David S. Miller's avatar David S. Miller

Merge branch 'flow_keys_digest'

Tom Herbert says:

====================
net: Eliminate calls to flow_dissector and introduce flow_keys_digest

In this patch set we add skb_get_hash_perturb which gets the skbuff
hash for a packet and perturbs it using a provided key and jhash1.
This function is used in serveral qdiscs and eliminates many calls
to flow_dissector and jhash3 to get a perturbed hash for a packet.

To handle the sch_choke issue (passes flow_keys in skbuff cb) we
add flow_keys_digest which is a digest of a flow constructed
from a flow_keys structure.

This is the second version of these patches I posted a while ago,
and is prerequisite work to increasing the size of the flow_keys
structure and hashing over it (full IPv6 address, flow label, VLAN ID,
etc.).

Version 2:

- Add keyval parameter to __flow_hash_from_keys which allows caller to
  set the initval for jhash
- Perturb always does flow dissection and creates hash based on
  input perturb value which acts as the keyval to __flow_hash_from_keys
- Added a _flow_keys_digest_data which is used in make_flow_keys_digest.
  This fills out the digest by populating individual fields instead
  of copying the whole structure.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6a211654 2e99403d
......@@ -927,6 +927,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
return skb->hash;
......
......@@ -42,4 +42,20 @@ static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8
u32 flow_hash_from_keys(struct flow_keys *keys);
unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
__be16 protocol);
/* struct flow_keys_digest:
*
* This structure is used to hold a digest of the full flow keys. This is a
* larger "hash" of a flow to allow definitively matching specific flows where
* the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
* that it can by used in CB of skb (see sch_choke for an example).
*/
#define FLOW_KEYS_DIGEST_LEN 16
struct flow_keys_digest {
u8 data[FLOW_KEYS_DIGEST_LEN];
};
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
#endif
......@@ -267,13 +267,12 @@ static __always_inline void __flow_hash_secret_init(void)
net_get_random_once(&hashrnd, sizeof(hashrnd));
}
static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c, u32 keyval)
{
__flow_hash_secret_init();
return jhash_3words(a, b, c, hashrnd);
return jhash_3words(a, b, c, keyval);
}
static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
{
u32 hash;
......@@ -287,7 +286,8 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
hash = __flow_hash_3words((__force u32)keys->dst,
(__force u32)keys->src,
(__force u32)keys->ports);
(__force u32)keys->ports,
keyval);
if (!hash)
hash = 1;
......@@ -296,10 +296,47 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
u32 flow_hash_from_keys(struct flow_keys *keys)
{
return __flow_hash_from_keys(keys);
__flow_hash_secret_init();
return __flow_hash_from_keys(keys, hashrnd);
}
EXPORT_SYMBOL(flow_hash_from_keys);
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
struct flow_keys *keys, u32 keyval)
{
if (!skb_flow_dissect(skb, keys))
return 0;
return __flow_hash_from_keys(keys, keyval);
}
struct _flow_keys_digest_data {
__be16 n_proto;
u8 ip_proto;
u8 padding;
__be32 ports;
__be32 src;
__be32 dst;
};
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow)
{
struct _flow_keys_digest_data *data =
(struct _flow_keys_digest_data *)digest;
BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
memset(digest, 0, sizeof(*digest));
data->n_proto = flow->n_proto;
data->ip_proto = flow->ip_proto;
data->ports = flow->ports;
data->src = flow->src;
data->dst = flow->dst;
}
EXPORT_SYMBOL(make_flow_keys_digest);
/*
* __skb_get_hash: calculate a flow hash based on src/dst addresses
* and src/dst port numbers. Sets hash in skb to non-zero hash value
......@@ -309,8 +346,12 @@ EXPORT_SYMBOL(flow_hash_from_keys);
void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
if (!skb_flow_dissect(skb, &keys))
__flow_hash_secret_init();
hash = ___skb_get_hash(skb, &keys, hashrnd);
if (!hash)
return;
if (keys.ports)
......@@ -318,10 +359,18 @@ void __skb_get_hash(struct sk_buff *skb)
skb->sw_hash = 1;
skb->hash = __flow_hash_from_keys(&keys);
skb->hash = hash;
}
EXPORT_SYMBOL(__skb_get_hash);
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
{
struct flow_keys keys;
return ___skb_get_hash(skb, &keys, perturb);
}
EXPORT_SYMBOL(skb_get_hash_perturb);
/*
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
* to be used as a distribution range.
......
......@@ -133,16 +133,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
--sch->q.qlen;
}
/* private part of skb->cb[] that a qdisc is allowed to use
* is limited to QDISC_CB_PRIV_LEN bytes.
* As a flow key might be too large, we store a part of it only.
*/
#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
struct choke_skb_cb {
u16 classid;
u8 keys_valid;
u8 keys[QDISC_CB_PRIV_LEN - 3];
struct flow_keys_digest keys;
};
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
......@@ -177,18 +171,18 @@ static bool choke_match_flow(struct sk_buff *skb1,
if (!choke_skb_cb(skb1)->keys_valid) {
choke_skb_cb(skb1)->keys_valid = 1;
skb_flow_dissect(skb1, &temp);
memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
}
if (!choke_skb_cb(skb2)->keys_valid) {
choke_skb_cb(skb2)->keys_valid = 1;
skb_flow_dissect(skb2, &temp);
memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
}
return !memcmp(&choke_skb_cb(skb1)->keys,
&choke_skb_cb(skb2)->keys,
CHOKE_K_LEN);
sizeof(choke_skb_cb(skb1)->keys));
}
/*
......
......@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/flow_keys.h>
#include <net/codel.h>
/* Fair Queue CoDel.
......@@ -68,15 +67,9 @@ struct fq_codel_sched_data {
};
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
const struct sk_buff *skb)
struct sk_buff *skb)
{
struct flow_keys keys;
unsigned int hash;
skb_flow_dissect(skb, &keys);
hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src ^ keys.ip_proto,
(__force u32)keys.ports, q->perturbation);
u32 hash = skb_get_hash_perturb(skb, q->perturbation);
return reciprocal_scale(hash, q->flows_cnt);
}
......
......@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <net/flow_keys.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
......@@ -176,22 +175,6 @@ static u32 hhf_time_stamp(void)
return jiffies;
}
static unsigned int skb_hash(const struct hhf_sched_data *q,
const struct sk_buff *skb)
{
struct flow_keys keys;
unsigned int hash;
if (skb->sk && skb->sk->sk_hash)
return skb->sk->sk_hash;
skb_flow_dissect(skb, &keys);
hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src ^ keys.ip_proto,
(__force u32)keys.ports, q->perturbation);
return hash;
}
/* Looks up a heavy-hitter flow in a chaining list of table T. */
static struct hh_flow_state *seek_list(const u32 hash,
struct list_head *head,
......@@ -280,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
}
/* Get hashed flow-id of the skb. */
hash = skb_hash(q, skb);
hash = skb_get_hash_perturb(skb, q->perturbation);
/* Check if this packet belongs to an already established HH flow. */
flow_pos = hash & HHF_BIT_MASK;
......
......@@ -26,7 +26,6 @@
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
#include <net/flow_keys.h>
/*
* SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
......@@ -285,9 +284,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int i;
u32 p_min = ~0;
u32 minqlen = ~0;
u32 r, slot, salt, sfbhash;
u32 r, sfbhash;
u32 slot = q->slot;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
struct flow_keys keys;
if (unlikely(sch->q.qlen >= q->limit)) {
qdisc_qstats_overlimit(sch);
......@@ -309,22 +308,17 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
fl = rcu_dereference_bh(q->filter_list);
if (fl) {
u32 salt;
/* If using external classifiers, get result and record it. */
if (!sfb_classify(skb, fl, &ret, &salt))
goto other_drop;
keys.src = salt;
keys.dst = 0;
keys.ports = 0;
sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
} else {
skb_flow_dissect(skb, &keys);
sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
}
slot = q->slot;
sfbhash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src,
(__force u32)keys.ports,
q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
......@@ -356,9 +350,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(p_min >= SFB_MAX_PROB)) {
/* Inelastic flow */
if (q->double_buffering) {
sfbhash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src,
(__force u32)keys.ports,
sfbhash = skb_get_hash_perturb(skb,
q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
......
......@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/flow_keys.h>
#include <net/red.h>
......@@ -156,30 +155,10 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_MAX_FLOWS];
}
/*
* In order to be able to quickly rehash our queue when timer changes
* q->perturbation, we store flow_keys in skb->cb[]
*/
struct sfq_skb_cb {
struct flow_keys keys;
};
static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
{
qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
}
static unsigned int sfq_hash(const struct sfq_sched_data *q,
const struct sk_buff *skb)
{
const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
unsigned int hash;
hash = jhash_3words((__force u32)keys->dst,
(__force u32)keys->src ^ keys->ip_proto,
(__force u32)keys->ports, q->perturbation);
return hash & (q->divisor - 1);
return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
}
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
......@@ -196,10 +175,8 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return TC_H_MIN(skb->priority);
fl = rcu_dereference_bh(q->filter_list);
if (!fl) {
skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
if (!fl)
return sfq_hash(q, skb) + 1;
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tc_classify(skb, fl, &res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment