Commit 62f64aed authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

pktgen: introduce xmit_mode '<start_xmit|netif_receive>'

Introduce xmit_mode 'netif_receive' for pktgen which generates the
packets using familiar pktgen commands, but feeds them into
netif_receive_skb() instead of ndo_start_xmit().

Default mode is called 'start_xmit'.

It is designed to test netif_receive_skb and ingress qdisc
performace only. Make sure to understand how it works before
using it for other rx benchmarking.

Sample script 'pktgen.sh':
\#!/bin/bash
function pgset() {
  local result

  echo $1 > $PGDEV

  result=`cat $PGDEV | fgrep "Result: OK:"`
  if [ "$result" = "" ]; then
    cat $PGDEV | fgrep Result:
  fi
}

[ -z "$1" ] && echo "Usage: $0 DEV" && exit 1
ETH=$1

PGDEV=/proc/net/pktgen/kpktgend_0
pgset "rem_device_all"
pgset "add_device $ETH"

PGDEV=/proc/net/pktgen/$ETH
pgset "xmit_mode netif_receive"
pgset "pkt_size 60"
pgset "dst 198.18.0.1"
pgset "dst_mac 90:e2:ba:ff:ff:ff"
pgset "count 10000000"
pgset "burst 32"

PGDEV=/proc/net/pktgen/pgctrl
echo "Running... ctrl^C to stop"
pgset "start"
echo "Done"
cat /proc/net/pktgen/$ETH

Usage:
$ sudo ./pktgen.sh eth2
...
Result: OK: 232376(c232372+d3) usec, 10000000 (60byte,0frags)
  43033682pps 20656Mb/sec (20656167360bps) errors: 10000000

Raw netif_receive_skb speed should be ~43 million packet
per second on 3.7Ghz x86 and 'perf report' should look like:
  37.69%  kpktgend_0   [kernel.vmlinux]  [k] __netif_receive_skb_core
  25.81%  kpktgend_0   [kernel.vmlinux]  [k] kfree_skb
   7.22%  kpktgend_0   [kernel.vmlinux]  [k] ip_rcv
   5.68%  kpktgend_0   [pktgen]          [k] pktgen_thread_worker

If fib_table_lookup is seen on top, it means skb was processed
by the stack. To benchmark netif_receive_skb only make sure
that 'dst_mac' of your pktgen script is different from
receiving device mac and it will be dropped by ip_rcv
Signed-off-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f1f00d8f
...@@ -193,6 +193,10 @@ Examples: ...@@ -193,6 +193,10 @@ Examples:
pgset "rate 300M" set rate to 300 Mb/s pgset "rate 300M" set rate to 300 Mb/s
pgset "ratep 1000000" set rate to 1Mpps pgset "ratep 1000000" set rate to 1Mpps
pgset "xmit_mode netif_receive" RX inject into stack netif_receive_skb()
Works with "burst" but not with "clone_skb".
Default xmit_mode is "start_xmit".
Sample scripts Sample scripts
============== ==============
...@@ -310,6 +314,9 @@ flowlen ...@@ -310,6 +314,9 @@ flowlen
rate rate
ratep ratep
xmit_mode <start_xmit|netif_receive>
References: References:
ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/ ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/ ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
......
...@@ -210,6 +210,10 @@ ...@@ -210,6 +210,10 @@
#define T_REMDEVALL (1<<2) /* Remove all devs */ #define T_REMDEVALL (1<<2) /* Remove all devs */
#define T_REMDEV (1<<3) /* Remove one dev */ #define T_REMDEV (1<<3) /* Remove one dev */
/* Xmit modes */
#define M_START_XMIT 0 /* Default normal TX */
#define M_NETIF_RECEIVE 1 /* Inject packets into stack */
/* If lock -- protects updating of if_list */ /* If lock -- protects updating of if_list */
#define if_lock(t) spin_lock(&(t->if_lock)); #define if_lock(t) spin_lock(&(t->if_lock));
#define if_unlock(t) spin_unlock(&(t->if_lock)); #define if_unlock(t) spin_unlock(&(t->if_lock));
...@@ -251,13 +255,14 @@ struct pktgen_dev { ...@@ -251,13 +255,14 @@ struct pktgen_dev {
* we will do a random selection from within the range. * we will do a random selection from within the range.
*/ */
__u32 flags; __u32 flags;
int removal_mark; /* non-zero => the device is marked for int xmit_mode;
* removal by worker thread */
int min_pkt_size; int min_pkt_size;
int max_pkt_size; int max_pkt_size;
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags; int nfrags;
int removal_mark; /* non-zero => the device is marked for
* removal by worker thread */
struct page *page; struct page *page;
u64 delay; /* nano-seconds */ u64 delay; /* nano-seconds */
...@@ -620,6 +625,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) ...@@ -620,6 +625,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->node >= 0) if (pkt_dev->node >= 0)
seq_printf(seq, " node: %d\n", pkt_dev->node); seq_printf(seq, " node: %d\n", pkt_dev->node);
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
seq_puts(seq, " xmit_mode: netif_receive\n");
seq_puts(seq, " Flags: "); seq_puts(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6) if (pkt_dev->flags & F_IPV6)
...@@ -1081,7 +1089,8 @@ static ssize_t pktgen_if_write(struct file *file, ...@@ -1081,7 +1089,8 @@ static ssize_t pktgen_if_write(struct file *file,
if (len < 0) if (len < 0)
return len; return len;
if ((value > 0) && if ((value > 0) &&
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
return -ENOTSUPP; return -ENOTSUPP;
i += len; i += len;
pkt_dev->clone_skb = value; pkt_dev->clone_skb = value;
...@@ -1134,7 +1143,7 @@ static ssize_t pktgen_if_write(struct file *file, ...@@ -1134,7 +1143,7 @@ static ssize_t pktgen_if_write(struct file *file,
return len; return len;
i += len; i += len;
if ((value > 1) && if ((value > 1) && (pkt_dev->xmit_mode == M_START_XMIT) &&
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
return -ENOTSUPP; return -ENOTSUPP;
pkt_dev->burst = value < 1 ? 1 : value; pkt_dev->burst = value < 1 ? 1 : value;
...@@ -1160,6 +1169,35 @@ static ssize_t pktgen_if_write(struct file *file, ...@@ -1160,6 +1169,35 @@ static ssize_t pktgen_if_write(struct file *file,
sprintf(pg_result, "ERROR: node not possible"); sprintf(pg_result, "ERROR: node not possible");
return count; return count;
} }
if (!strcmp(name, "xmit_mode")) {
char f[32];
memset(f, 0, 32);
len = strn_len(&user_buffer[i], sizeof(f) - 1);
if (len < 0)
return len;
if (copy_from_user(f, &user_buffer[i], len))
return -EFAULT;
i += len;
if (strcmp(f, "start_xmit") == 0) {
pkt_dev->xmit_mode = M_START_XMIT;
} else if (strcmp(f, "netif_receive") == 0) {
/* clone_skb set earlier, not supported in this mode */
if (pkt_dev->clone_skb > 0)
return -ENOTSUPP;
pkt_dev->xmit_mode = M_NETIF_RECEIVE;
} else {
sprintf(pg_result,
"xmit_mode -:%s:- unknown\nAvailable modes: %s",
f, "start_xmit, netif_receive\n");
return count;
}
sprintf(pg_result, "OK: xmit_mode=%s", f);
return count;
}
if (!strcmp(name, "flag")) { if (!strcmp(name, "flag")) {
char f[32]; char f[32];
memset(f, 0, 32); memset(f, 0, 32);
...@@ -3320,6 +3358,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3320,6 +3358,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
unsigned int burst = ACCESS_ONCE(pkt_dev->burst); unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
struct net_device *odev = pkt_dev->odev; struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq; struct netdev_queue *txq;
struct sk_buff *skb;
int ret; int ret;
/* If device is offline, then don't send */ /* If device is offline, then don't send */
...@@ -3357,6 +3396,38 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3357,6 +3396,38 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->delay && pkt_dev->last_ok) if (pkt_dev->delay && pkt_dev->last_ok)
spin(pkt_dev, pkt_dev->next_tx); spin(pkt_dev, pkt_dev->next_tx);
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
skb = pkt_dev->skb;
skb->protocol = eth_type_trans(skb, skb->dev);
atomic_add(burst, &skb->users);
local_bh_disable();
do {
ret = netif_receive_skb(skb);
if (ret == NET_RX_DROP)
pkt_dev->errors++;
pkt_dev->sofar++;
pkt_dev->seq_num++;
if (atomic_read(&skb->users) != burst) {
/* skb was queued by rps/rfs or taps,
* so cannot reuse this skb
*/
atomic_sub(burst - 1, &skb->users);
/* get out of the loop and wait
* until skb is consumed
*/
pkt_dev->last_ok = 1;
break;
}
/* skb was 'freed' by stack, so clean few
* bits and reuse it
*/
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = 0; /* reset reclass/redir ttl */
#endif
} while (--burst > 0);
goto out; /* Skips xmit_mode M_START_XMIT */
}
txq = skb_get_tx_queue(odev, pkt_dev->skb); txq = skb_get_tx_queue(odev, pkt_dev->skb);
local_bh_disable(); local_bh_disable();
...@@ -3404,6 +3475,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3404,6 +3475,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
unlock: unlock:
HARD_TX_UNLOCK(odev, txq); HARD_TX_UNLOCK(odev, txq);
out:
local_bh_enable(); local_bh_enable();
/* If pkt_dev->count is zero, then run forever */ /* If pkt_dev->count is zero, then run forever */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment