Commit 5bcb5c7e authored by Dirk van der Merwe's avatar Dirk van der Merwe Committed by David S. Miller

nfp: tls: set skb decrypted flag

Firmware indicates when a packet has been decrypted by reusing the
currently unused BPF flag.  Transfer this information into the skb
and provide a statistic of all decrypted segments.
Signed-off-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f953d33b
...@@ -240,7 +240,7 @@ struct nfp_net_tx_ring { ...@@ -240,7 +240,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11)) #define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10)) #define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9)) #define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8)) #define PCIE_DESC_RX_DECRYPTED cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7)) #define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6)) #define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5)) #define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
...@@ -367,6 +367,7 @@ struct nfp_net_rx_ring { ...@@ -367,6 +367,7 @@ struct nfp_net_rx_ring {
* @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
* @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
* @hw_csum_rx_error: Counter of packets with bad checksums * @hw_csum_rx_error: Counter of packets with bad checksums
* @hw_tls_rx: Number of packets with TLS decrypted by hardware
* @tx_sync: Seqlock for atomic updates of TX stats * @tx_sync: Seqlock for atomic updates of TX stats
* @tx_pkts: Number of Transmitted packets * @tx_pkts: Number of Transmitted packets
* @tx_bytes: Number of Transmitted bytes * @tx_bytes: Number of Transmitted bytes
...@@ -415,6 +416,7 @@ struct nfp_net_r_vector { ...@@ -415,6 +416,7 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_ok; u64 hw_csum_rx_ok;
u64 hw_csum_rx_inner_ok; u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_complete; u64 hw_csum_rx_complete;
u64 hw_tls_rx;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 rx_replace_buf_alloc_fail; u64 rx_replace_buf_alloc_fail;
......
...@@ -1951,6 +1951,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1951,6 +1951,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb); nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
#ifdef CONFIG_TLS_DEVICE
if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
skb->decrypted = true;
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_tls_rx++;
u64_stats_update_end(&r_vec->rx_sync);
}
#endif
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan)); le16_to_cpu(rxd->rxd.vlan));
......
...@@ -150,7 +150,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { ...@@ -150,7 +150,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9 #define NN_ET_SWITCH_STATS_LEN 9
#define NN_RVEC_GATHER_STATS 12 #define NN_RVEC_GATHER_STATS 13
#define NN_RVEC_PER_Q_STATS 3 #define NN_RVEC_PER_Q_STATS 3
#define NN_CTRL_PATH_STATS 1 #define NN_CTRL_PATH_STATS 1
...@@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) ...@@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_complete"); data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err"); data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "rx_tls_decrypted");
data = nfp_pr_et(data, "hw_tx_csum"); data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum"); data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather"); data = nfp_pr_et(data, "tx_gather");
...@@ -475,19 +476,20 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) ...@@ -475,19 +476,20 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[2] = nn->r_vecs[i].hw_csum_rx_complete; tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
tmp[3] = nn->r_vecs[i].hw_csum_rx_error; tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do { do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts; data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy; data[2] = nn->r_vecs[i].tx_busy;
tmp[5] = nn->r_vecs[i].hw_csum_tx; tmp[6] = nn->r_vecs[i].hw_csum_tx;
tmp[6] = nn->r_vecs[i].hw_csum_tx_inner; tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[7] = nn->r_vecs[i].tx_gather; tmp[8] = nn->r_vecs[i].tx_gather;
tmp[8] = nn->r_vecs[i].tx_lso; tmp[9] = nn->r_vecs[i].tx_lso;
tmp[9] = nn->r_vecs[i].hw_tls_tx; tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[10] = nn->r_vecs[i].tls_tx_fallback; tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[11] = nn->r_vecs[i].tls_tx_no_fallback; tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS; data += NN_RVEC_PER_Q_STATS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment